in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
google__timesketch-949
Yeti analyzer crash From the celery log: ``` [2019-07-17 09:11:37,463: ERROR/ForkPoolWorker-1] Task timesketch.lib.tasks.run_sketch_analyzer[46d24990-12df-4c88-a79b-a3b5f1c04b01] raised unexpected: TypeError("unsupported operand type(s) for +: 'NoneType' and 'unicode'",) Traceback (most recent call last): File "/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py", line 374, in trace_task R = retval = fun(*args, **kwargs) File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/__init__.py", line 181, in __call__ return TaskBase.__call__(self, *args, **kwargs) File "/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py", line 629, in __protected_call__ return self.run(*args, **kwargs) File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/tasks.py", line 339, in run_sketch_analyzer result = analyzer.run_wrapper() File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py", line 37, in wrapper func_return = func(self, *args, **kwargs) File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py", line 403, in run_wrapper result = self.run() File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py", line 92, in run self.get_intrusion_sets() File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py", line 75, in get_intrusion_sets self.yeti_api_root + '/entities/filter/', TypeError: unsupported operand type(s) for +: 'NoneType' and 'unicode' ```
[ { "content": "\"\"\"Index analyzer plugin for Yeti indicators.\"\"\"\nfrom __future__ import unicode_literals\n\nfrom flask import current_app\nimport requests\n\nfrom timesketch.lib.analyzers import interface\nfrom timesketch.lib.analyzers import manager\nfrom timesketch.lib import emojis\n\n\ndef build_query_for_indicators(indicators):\n \"\"\"Builds an Elasticsearch query for Yeti indicator patterns.\n\n Prepends and appends .* to the regex to be able to search within a field.\n\n Returns:\n The resulting ES query string.\n \"\"\"\n query = []\n for domain in indicators:\n query.append('domain:/.*{0:s}.*/'.format(domain['pattern']))\n return ' OR '.join(query)\n\n\nclass YetiIndicators(interface.BaseSketchAnalyzer):\n \"\"\"Index analyzer for Yeti threat intel indicators.\"\"\"\n\n NAME = 'yetiindicators'\n DEPENDENCIES = frozenset(['domain'])\n\n def __init__(self, index_name, sketch_id):\n \"\"\"Initialize the Index Analyzer.\n\n Args:\n index_name: Elasticsearch index name\n \"\"\"\n super(YetiIndicators, self).__init__(index_name, sketch_id)\n self.intel = {}\n self.yeti_api_root = current_app.config.get('YETI_API_ROOT')\n self.yeti_api_key = current_app.config.get('YETI_API_KEY')\n self.yeti_indicator_labels = current_app.config.get(\n 'YETI_INDICATOR_LABELS', [])\n\n def get_bad_domain_indicators(self, entity_id):\n \"\"\"Retrieves a list of indicators associated to a given entity.\n\n Args:\n entity_id (str): STIX ID of the entity to get associated inticators\n from. (typically an Intrusion Set)\n\n Returns:\n A list of JSON objects describing a Yeti Indicator.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/{0:s}/neighbors/'.format(entity_id),\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return []\n domain_indicators = []\n for neighbor in results.json().get('vertices', {}).values():\n if neighbor['type'] == 'x-regex' and \\\n set(self.yeti_indicator_labels) <= set(neighbor['labels']):\n domain_indicators.append(neighbor)\n\n return domain_indicators\n\n def get_intrusion_sets(self):\n \"\"\"Populates the intel attribute with data from Yeti.\n\n Retrieved intel consists of Intrusion sets and associated Indicators.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/filter/',\n json={'name': '', 'type': 'intrusion-set'},\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return\n self.intel = {item['id']: item for item in results.json()}\n for _id in self.intel:\n self.intel[_id]['indicators'] = self.get_bad_domain_indicators(_id)\n\n def run(self):\n \"\"\"Entry point for the analyzer.\n\n Returns:\n String with summary of the analyzer result\n \"\"\"\n\n self.get_intrusion_sets()\n actors_found = []\n for intrusion_set in self.intel.values():\n if not intrusion_set['indicators']:\n continue\n\n found = False\n\n for indicator in intrusion_set['indicators']:\n query = build_query_for_indicators([indicator])\n\n events = self.event_stream(query_string=query,\n return_fields=[])\n\n name = intrusion_set['name']\n for event in events:\n found = True\n event.add_emojis([emojis.get_emoji('SKULL')])\n event.add_tags([name])\n event.commit()\n event.add_comment(\n 'Indicator \"{0:s}\" found for actor \"{1:s}\"'.format(\n indicator['name'], name))\n\n if found:\n actors_found.append(name)\n self.sketch.add_view(\n 'Domain activity for actor {0:s}'.format(name),\n self.NAME,\n query_string=query)\n\n if actors_found:\n return '{0:d} actors were found! [{1:s}]'.format(\n len(actors_found), ', '.join(actors_found))\n return 'No indicators were found in the timeline.'\n\n\nmanager.AnalysisManager.register_analyzer(YetiIndicators)\n", "path": "timesketch/lib/analyzers/yetiindicators.py" } ]
[ { "content": "\"\"\"Index analyzer plugin for Yeti indicators.\"\"\"\nfrom __future__ import unicode_literals\n\nfrom flask import current_app\nimport requests\n\nfrom timesketch.lib.analyzers import interface\nfrom timesketch.lib.analyzers import manager\nfrom timesketch.lib import emojis\n\n\ndef build_query_for_indicators(indicators):\n \"\"\"Builds an Elasticsearch query for Yeti indicator patterns.\n\n Prepends and appends .* to the regex to be able to search within a field.\n\n Returns:\n The resulting ES query string.\n \"\"\"\n query = []\n for domain in indicators:\n query.append('domain:/.*{0:s}.*/'.format(domain['pattern']))\n return ' OR '.join(query)\n\n\nclass YetiIndicators(interface.BaseSketchAnalyzer):\n \"\"\"Index analyzer for Yeti threat intel indicators.\"\"\"\n\n NAME = 'yetiindicators'\n DEPENDENCIES = frozenset(['domain'])\n\n def __init__(self, index_name, sketch_id):\n \"\"\"Initialize the Index Analyzer.\n\n Args:\n index_name: Elasticsearch index name\n \"\"\"\n super(YetiIndicators, self).__init__(index_name, sketch_id)\n self.intel = {}\n self.yeti_api_root = current_app.config.get('YETI_API_ROOT')\n self.yeti_api_key = current_app.config.get('YETI_API_KEY')\n self.yeti_indicator_labels = current_app.config.get(\n 'YETI_INDICATOR_LABELS', [])\n\n def get_bad_domain_indicators(self, entity_id):\n \"\"\"Retrieves a list of indicators associated to a given entity.\n\n Args:\n entity_id (str): STIX ID of the entity to get associated inticators\n from. (typically an Intrusion Set)\n\n Returns:\n A list of JSON objects describing a Yeti Indicator.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/{0:s}/neighbors/'.format(entity_id),\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return []\n domain_indicators = []\n for neighbor in results.json().get('vertices', {}).values():\n if neighbor['type'] == 'x-regex' and \\\n set(self.yeti_indicator_labels) <= set(neighbor['labels']):\n domain_indicators.append(neighbor)\n\n return domain_indicators\n\n def get_intrusion_sets(self):\n \"\"\"Populates the intel attribute with data from Yeti.\n\n Retrieved intel consists of Intrusion sets and associated Indicators.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/filter/',\n json={'name': '', 'type': 'intrusion-set'},\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return\n self.intel = {item['id']: item for item in results.json()}\n for _id in self.intel:\n self.intel[_id]['indicators'] = self.get_bad_domain_indicators(_id)\n\n def run(self):\n \"\"\"Entry point for the analyzer.\n\n Returns:\n String with summary of the analyzer result\n \"\"\"\n if not self.yeti_api_root or not self.yeti_api_key:\n return 'No Yeti configuration settings found, aborting.'\n\n self.get_intrusion_sets()\n actors_found = []\n for intrusion_set in self.intel.values():\n if not intrusion_set['indicators']:\n continue\n\n found = False\n\n for indicator in intrusion_set['indicators']:\n query = build_query_for_indicators([indicator])\n\n events = self.event_stream(query_string=query,\n return_fields=[])\n\n name = intrusion_set['name']\n for event in events:\n found = True\n event.add_emojis([emojis.get_emoji('SKULL')])\n event.add_tags([name])\n event.commit()\n event.add_comment(\n 'Indicator \"{0:s}\" found for actor \"{1:s}\"'.format(\n indicator['name'], name))\n\n if found:\n actors_found.append(name)\n self.sketch.add_view(\n 'Domain activity for actor {0:s}'.format(name),\n self.NAME,\n query_string=query)\n\n if actors_found:\n return '{0:d} actors were found! [{1:s}]'.format(\n len(actors_found), ', '.join(actors_found))\n return 'No indicators were found in the timeline.'\n\n\nmanager.AnalysisManager.register_analyzer(YetiIndicators)\n", "path": "timesketch/lib/analyzers/yetiindicators.py" } ]
diff --git a/timesketch/lib/analyzers/yetiindicators.py b/timesketch/lib/analyzers/yetiindicators.py index 16b1afe24e..5c8a0347d5 100644 --- a/timesketch/lib/analyzers/yetiindicators.py +++ b/timesketch/lib/analyzers/yetiindicators.py @@ -88,6 +88,8 @@ def run(self): Returns: String with summary of the analyzer result """ + if not self.yeti_api_root or not self.yeti_api_key: + return 'No Yeti configuration settings found, aborting.' self.get_intrusion_sets() actors_found = []
ivy-llc__ivy-18208
expand
[ { "content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py" } ]
[ { "content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef expand(x, shape, name=None):\n return ivy.expand(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py" } ]
diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py index adb24100b6e7c..cb68d6878a4aa 100644 --- a/ivy/functional/frontends/paddle/tensor/manipulation.py +++ b/ivy/functional/frontends/paddle/tensor/manipulation.py @@ -61,6 +61,15 @@ def squeeze(x, axis=None, name=None): return ivy.squeeze(x, axis=axis) +@with_supported_dtypes( + {"2.5.0 and below": ("bool", "float32", "float64", "int32", "int64")}, + "paddle", +) +@to_ivy_arrays_and_back +def expand(x, shape, name=None): + return ivy.expand(x, shape) + + @with_supported_dtypes( { "2.5.0 and below": ( diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_manipulation.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_manipulation.py index 38fd74caf3c2f..0567f82cc01fa 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_manipulation.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_manipulation.py @@ -336,6 +336,51 @@ def test_paddle_squeeze( ) +# expand [email protected] +def _expand_helper(draw): + dtype_and_x = draw( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_num_dims=1, + max_num_dims=6, + ) + ) + + dtype, x = dtype_and_x + input_shape = x[0].shape + + max_num_dims = 6 - len(input_shape) + shape = draw(helpers.get_shape(max_num_dims=max_num_dims)) + input_shape + + return dtype, x, shape + + +@handle_frontend_test( + fn_tree="paddle.expand", + dtype_x_and_shape=_expand_helper(), +) +def test_paddle_expand( + *, + dtype_x_and_shape, + on_device, + fn_tree, + frontend, + test_flags, +): + input_dtype, x, shape = dtype_x_and_shape + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + shape=shape, + ) + + +# cast @handle_frontend_test( fn_tree="paddle.cast", dtype_and_x=helpers.dtype_and_values(
encode__uvicorn-2061
WebSockets 11.0 broke Uvicorn test suite As the title says... Changelog: https://websockets.readthedocs.io/en/stable/project/changelog.html
[ { "content": "import asyncio\nimport http\nimport logging\nfrom typing import (\n Any,\n Dict,\n List,\n Literal,\n Optional,\n Sequence,\n Tuple,\n Union,\n cast,\n)\nfrom urllib.parse import unquote\n\nimport websockets\nfrom websockets.datastructures import Headers\nfrom websockets.exceptions import ConnectionClosed\nfrom websockets.extensions.permessage_deflate import ServerPerMessageDeflateFactory\nfrom websockets.legacy.server import HTTPResponse\nfrom websockets.server import WebSocketServerProtocol\nfrom websockets.typing import Subprotocol\n\nfrom uvicorn._types import (\n ASGISendEvent,\n WebSocketAcceptEvent,\n WebSocketCloseEvent,\n WebSocketConnectEvent,\n WebSocketDisconnectEvent,\n WebSocketReceiveEvent,\n WebSocketScope,\n WebSocketSendEvent,\n)\nfrom uvicorn.config import Config\nfrom uvicorn.logging import TRACE_LOG_LEVEL\nfrom uvicorn.protocols.utils import (\n get_local_addr,\n get_path_with_query_string,\n get_remote_addr,\n is_ssl,\n)\nfrom uvicorn.server import ServerState\n\n\nclass Server:\n closing = False\n\n def register(self, ws: WebSocketServerProtocol) -> None:\n pass\n\n def unregister(self, ws: WebSocketServerProtocol) -> None:\n pass\n\n def is_serving(self) -> bool:\n return not self.closing\n\n\nclass WebSocketProtocol(WebSocketServerProtocol):\n extra_headers: List[Tuple[str, str]]\n\n def __init__(\n self,\n config: Config,\n server_state: ServerState,\n app_state: Dict[str, Any],\n _loop: Optional[asyncio.AbstractEventLoop] = None,\n ):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.root_path = config.root_path\n self.app_state = app_state\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n\n # Connection state\n self.transport: asyncio.Transport = None # type: ignore[assignment]\n self.server: Optional[Tuple[str, int]] = None\n self.client: Optional[Tuple[str, int]] = None\n self.scheme: Literal[\"wss\", \"ws\"] = None # type: ignore[assignment]\n\n # Connection events\n self.scope: WebSocketScope = None # type: ignore[assignment]\n self.handshake_started_event = asyncio.Event()\n self.handshake_completed_event = asyncio.Event()\n self.closed_event = asyncio.Event()\n self.initial_response: Optional[HTTPResponse] = None\n self.connect_sent = False\n self.lost_connection_before_handshake = False\n self.accepted_subprotocol: Optional[Subprotocol] = None\n\n self.ws_server: Server = Server() # type: ignore[assignment]\n\n extensions = []\n if self.config.ws_per_message_deflate:\n extensions.append(ServerPerMessageDeflateFactory())\n\n super().__init__(\n ws_handler=self.ws_handler,\n ws_server=self.ws_server, # type: ignore[arg-type]\n max_size=self.config.ws_max_size,\n max_queue=self.config.ws_max_queue,\n ping_interval=self.config.ws_ping_interval,\n ping_timeout=self.config.ws_ping_timeout,\n extensions=extensions,\n logger=logging.getLogger(\"uvicorn.error\"),\n )\n self.server_header = None\n self.extra_headers = [\n (name.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for name, value in server_state.default_headers\n ]\n\n def connection_made( # type: ignore[override]\n self, transport: asyncio.Transport\n ) -> None:\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n\n if self.logger.isEnabledFor(TRACE_LOG_LEVEL):\n prefix = \"%s:%d - \" % self.client if self.client else \"\"\n self.logger.log(TRACE_LOG_LEVEL, \"%sWebSocket connection made\", prefix)\n\n super().connection_made(transport)\n\n def connection_lost(self, exc: Optional[Exception]) -> None:\n self.connections.remove(self)\n\n if self.logger.isEnabledFor(TRACE_LOG_LEVEL):\n prefix = \"%s:%d - \" % self.client if self.client else \"\"\n self.logger.log(TRACE_LOG_LEVEL, \"%sWebSocket connection lost\", prefix)\n\n self.lost_connection_before_handshake = (\n not self.handshake_completed_event.is_set()\n )\n self.handshake_completed_event.set()\n super().connection_lost(exc)\n if exc is None:\n self.transport.close()\n\n def shutdown(self) -> None:\n self.ws_server.closing = True\n if self.handshake_completed_event.is_set():\n self.fail_connection(1012)\n else:\n self.send_500_response()\n self.transport.close()\n\n def on_task_complete(self, task: asyncio.Task) -> None:\n self.tasks.discard(task)\n\n async def process_request(\n self, path: str, headers: Headers\n ) -> Optional[HTTPResponse]:\n \"\"\"\n This hook is called to determine if the websocket should return\n an HTTP response and close.\n\n Our behavior here is to start the ASGI application, and then wait\n for either `accept` or `close` in order to determine if we should\n close the connection.\n \"\"\"\n path_portion, _, query_string = path.partition(\"?\")\n\n websockets.legacy.handshake.check_request(headers)\n\n subprotocols = []\n for header in headers.get_all(\"Sec-WebSocket-Protocol\"):\n subprotocols.extend([token.strip() for token in header.split(\",\")])\n\n asgi_headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\", errors=\"surrogateescape\"))\n for name, value in headers.raw_items()\n ]\n\n self.scope = {\n \"type\": \"websocket\",\n \"asgi\": {\"version\": self.config.asgi_version, \"spec_version\": \"2.3\"},\n \"http_version\": \"1.1\",\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(path_portion),\n \"raw_path\": path_portion.encode(\"ascii\"),\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": asgi_headers,\n \"subprotocols\": subprotocols,\n \"state\": self.app_state.copy(),\n }\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n await self.handshake_started_event.wait()\n return self.initial_response\n\n def process_subprotocol(\n self, headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]]\n ) -> Optional[Subprotocol]:\n \"\"\"\n We override the standard 'process_subprotocol' behavior here so that\n we return whatever subprotocol is sent in the 'accept' message.\n \"\"\"\n return self.accepted_subprotocol\n\n def send_500_response(self) -> None:\n msg = b\"Internal Server Error\"\n content = [\n b\"HTTP/1.1 500 Internal Server Error\\r\\n\"\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode(\"ascii\") + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg,\n ]\n self.transport.write(b\"\".join(content))\n # Allow handler task to terminate cleanly, as websockets doesn't cancel it by\n # itself (see https://github.com/encode/uvicorn/issues/920)\n self.handshake_started_event.set()\n\n async def ws_handler( # type: ignore[override]\n self, protocol: WebSocketServerProtocol, path: str\n ) -> Any:\n \"\"\"\n This is the main handler function for the 'websockets' implementation\n to call into. We just wait for close then return, and instead allow\n 'send' and 'receive' events to drive the flow.\n \"\"\"\n self.handshake_completed_event.set()\n await self.closed_event.wait()\n\n async def run_asgi(self) -> None:\n \"\"\"\n Wrapper around the ASGI callable, handling exceptions and unexpected\n termination states.\n \"\"\"\n try:\n result = await self.app(self.scope, self.asgi_receive, self.asgi_send)\n except BaseException as exc:\n self.closed_event.set()\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_started_event.is_set():\n self.send_500_response()\n else:\n await self.handshake_completed_event.wait()\n self.transport.close()\n else:\n self.closed_event.set()\n if not self.handshake_started_event.is_set():\n msg = \"ASGI callable returned without sending handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n await self.handshake_completed_event.wait()\n self.transport.close()\n\n async def asgi_send(self, message: \"ASGISendEvent\") -> None:\n message_type = message[\"type\"]\n\n if not self.handshake_started_event.is_set():\n if message_type == \"websocket.accept\":\n message = cast(\"WebSocketAcceptEvent\", message)\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n get_path_with_query_string(self.scope),\n )\n self.initial_response = None\n self.accepted_subprotocol = cast(\n Optional[Subprotocol], message.get(\"subprotocol\")\n )\n if \"headers\" in message:\n self.extra_headers.extend(\n # ASGI spec requires bytes\n # But for compatibility we need to convert it to strings\n (name.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for name, value in message[\"headers\"]\n )\n self.handshake_started_event.set()\n\n elif message_type == \"websocket.close\":\n message = cast(\"WebSocketCloseEvent\", message)\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n get_path_with_query_string(self.scope),\n )\n self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b\"\")\n self.handshake_started_event.set()\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.accept' or 'websocket.close', \"\n \"but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n elif not self.closed_event.is_set():\n await self.handshake_completed_event.wait()\n\n if message_type == \"websocket.send\":\n message = cast(\"WebSocketSendEvent\", message)\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n await self.send(data) # type: ignore[arg-type]\n\n elif message_type == \"websocket.close\":\n message = cast(\"WebSocketCloseEvent\", message)\n code = message.get(\"code\", 1000)\n reason = message.get(\"reason\", \"\") or \"\"\n await self.close(code, reason)\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.send' or 'websocket.close',\"\n \" but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def asgi_receive(\n self,\n ) -> Union[\n \"WebSocketDisconnectEvent\", \"WebSocketConnectEvent\", \"WebSocketReceiveEvent\"\n ]:\n if not self.connect_sent:\n self.connect_sent = True\n return {\"type\": \"websocket.connect\"}\n\n await self.handshake_completed_event.wait()\n\n if self.lost_connection_before_handshake:\n # If the handshake failed or the app closed before handshake completion,\n # use 1006 Abnormal Closure.\n return {\"type\": \"websocket.disconnect\", \"code\": 1006}\n\n if self.closed_event.is_set():\n return {\"type\": \"websocket.disconnect\", \"code\": 1005}\n\n try:\n data = await self.recv()\n except ConnectionClosed as exc:\n self.closed_event.set()\n if self.ws_server.closing:\n return {\"type\": \"websocket.disconnect\", \"code\": 1012}\n return {\"type\": \"websocket.disconnect\", \"code\": exc.code}\n\n msg: WebSocketReceiveEvent = { # type: ignore[typeddict-item]\n \"type\": \"websocket.receive\"\n }\n\n if isinstance(data, str):\n msg[\"text\"] = data\n else:\n msg[\"bytes\"] = data\n\n return msg\n", "path": "uvicorn/protocols/websockets/websockets_impl.py" } ]
[ { "content": "import asyncio\nimport http\nimport logging\nfrom typing import (\n Any,\n Dict,\n List,\n Literal,\n Optional,\n Sequence,\n Tuple,\n Union,\n cast,\n)\nfrom urllib.parse import unquote\n\nimport websockets\nfrom websockets.datastructures import Headers\nfrom websockets.exceptions import ConnectionClosed\nfrom websockets.extensions.permessage_deflate import ServerPerMessageDeflateFactory\nfrom websockets.legacy.server import HTTPResponse\nfrom websockets.server import WebSocketServerProtocol\nfrom websockets.typing import Subprotocol\n\nfrom uvicorn._types import (\n ASGISendEvent,\n WebSocketAcceptEvent,\n WebSocketCloseEvent,\n WebSocketConnectEvent,\n WebSocketDisconnectEvent,\n WebSocketReceiveEvent,\n WebSocketScope,\n WebSocketSendEvent,\n)\nfrom uvicorn.config import Config\nfrom uvicorn.logging import TRACE_LOG_LEVEL\nfrom uvicorn.protocols.utils import (\n get_local_addr,\n get_path_with_query_string,\n get_remote_addr,\n is_ssl,\n)\nfrom uvicorn.server import ServerState\n\n\nclass Server:\n closing = False\n\n def register(self, ws: WebSocketServerProtocol) -> None:\n pass\n\n def unregister(self, ws: WebSocketServerProtocol) -> None:\n pass\n\n def is_serving(self) -> bool:\n return not self.closing\n\n\nclass WebSocketProtocol(WebSocketServerProtocol):\n extra_headers: List[Tuple[str, str]]\n\n def __init__(\n self,\n config: Config,\n server_state: ServerState,\n app_state: Dict[str, Any],\n _loop: Optional[asyncio.AbstractEventLoop] = None,\n ):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.root_path = config.root_path\n self.app_state = app_state\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n\n # Connection state\n self.transport: asyncio.Transport = None # type: ignore[assignment]\n self.server: Optional[Tuple[str, int]] = None\n self.client: Optional[Tuple[str, int]] = None\n self.scheme: Literal[\"wss\", \"ws\"] = None # type: ignore[assignment]\n\n # Connection events\n self.scope: WebSocketScope = None # type: ignore[assignment]\n self.handshake_started_event = asyncio.Event()\n self.handshake_completed_event = asyncio.Event()\n self.closed_event = asyncio.Event()\n self.initial_response: Optional[HTTPResponse] = None\n self.connect_sent = False\n self.lost_connection_before_handshake = False\n self.accepted_subprotocol: Optional[Subprotocol] = None\n\n self.ws_server: Server = Server() # type: ignore[assignment]\n\n extensions = []\n if self.config.ws_per_message_deflate:\n extensions.append(ServerPerMessageDeflateFactory())\n\n super().__init__(\n ws_handler=self.ws_handler,\n ws_server=self.ws_server, # type: ignore[arg-type]\n max_size=self.config.ws_max_size,\n max_queue=self.config.ws_max_queue,\n ping_interval=self.config.ws_ping_interval,\n ping_timeout=self.config.ws_ping_timeout,\n extensions=extensions,\n logger=logging.getLogger(\"uvicorn.error\"),\n )\n self.server_header = None\n self.extra_headers = [\n (name.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for name, value in server_state.default_headers\n ]\n\n def connection_made( # type: ignore[override]\n self, transport: asyncio.Transport\n ) -> None:\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n\n if self.logger.isEnabledFor(TRACE_LOG_LEVEL):\n prefix = \"%s:%d - \" % self.client if self.client else \"\"\n self.logger.log(TRACE_LOG_LEVEL, \"%sWebSocket connection made\", prefix)\n\n super().connection_made(transport)\n\n def connection_lost(self, exc: Optional[Exception]) -> None:\n self.connections.remove(self)\n\n if self.logger.isEnabledFor(TRACE_LOG_LEVEL):\n prefix = \"%s:%d - \" % self.client if self.client else \"\"\n self.logger.log(TRACE_LOG_LEVEL, \"%sWebSocket connection lost\", prefix)\n\n self.lost_connection_before_handshake = (\n not self.handshake_completed_event.is_set()\n )\n self.handshake_completed_event.set()\n super().connection_lost(exc)\n if exc is None:\n self.transport.close()\n\n def shutdown(self) -> None:\n self.ws_server.closing = True\n if self.handshake_completed_event.is_set():\n self.fail_connection(1012)\n else:\n self.send_500_response()\n self.transport.close()\n\n def on_task_complete(self, task: asyncio.Task) -> None:\n self.tasks.discard(task)\n\n async def process_request(\n self, path: str, headers: Headers\n ) -> Optional[HTTPResponse]:\n \"\"\"\n This hook is called to determine if the websocket should return\n an HTTP response and close.\n\n Our behavior here is to start the ASGI application, and then wait\n for either `accept` or `close` in order to determine if we should\n close the connection.\n \"\"\"\n path_portion, _, query_string = path.partition(\"?\")\n\n websockets.legacy.handshake.check_request(headers)\n\n subprotocols = []\n for header in headers.get_all(\"Sec-WebSocket-Protocol\"):\n subprotocols.extend([token.strip() for token in header.split(\",\")])\n\n asgi_headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\", errors=\"surrogateescape\"))\n for name, value in headers.raw_items()\n ]\n\n self.scope = {\n \"type\": \"websocket\",\n \"asgi\": {\"version\": self.config.asgi_version, \"spec_version\": \"2.3\"},\n \"http_version\": \"1.1\",\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(path_portion),\n \"raw_path\": path_portion.encode(\"ascii\"),\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": asgi_headers,\n \"subprotocols\": subprotocols,\n \"state\": self.app_state.copy(),\n }\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n await self.handshake_started_event.wait()\n return self.initial_response\n\n def process_subprotocol(\n self, headers: Headers, available_subprotocols: Optional[Sequence[Subprotocol]]\n ) -> Optional[Subprotocol]:\n \"\"\"\n We override the standard 'process_subprotocol' behavior here so that\n we return whatever subprotocol is sent in the 'accept' message.\n \"\"\"\n return self.accepted_subprotocol\n\n def send_500_response(self) -> None:\n msg = b\"Internal Server Error\"\n content = [\n b\"HTTP/1.1 500 Internal Server Error\\r\\n\"\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode(\"ascii\") + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg,\n ]\n self.transport.write(b\"\".join(content))\n # Allow handler task to terminate cleanly, as websockets doesn't cancel it by\n # itself (see https://github.com/encode/uvicorn/issues/920)\n self.handshake_started_event.set()\n\n async def ws_handler( # type: ignore[override]\n self, protocol: WebSocketServerProtocol, path: str\n ) -> Any:\n \"\"\"\n This is the main handler function for the 'websockets' implementation\n to call into. We just wait for close then return, and instead allow\n 'send' and 'receive' events to drive the flow.\n \"\"\"\n self.handshake_completed_event.set()\n await self.wait_closed()\n\n async def run_asgi(self) -> None:\n \"\"\"\n Wrapper around the ASGI callable, handling exceptions and unexpected\n termination states.\n \"\"\"\n try:\n result = await self.app(self.scope, self.asgi_receive, self.asgi_send)\n except BaseException as exc:\n self.closed_event.set()\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_started_event.is_set():\n self.send_500_response()\n else:\n await self.handshake_completed_event.wait()\n self.transport.close()\n else:\n self.closed_event.set()\n if not self.handshake_started_event.is_set():\n msg = \"ASGI callable returned without sending handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n await self.handshake_completed_event.wait()\n self.transport.close()\n\n async def asgi_send(self, message: \"ASGISendEvent\") -> None:\n message_type = message[\"type\"]\n\n if not self.handshake_started_event.is_set():\n if message_type == \"websocket.accept\":\n message = cast(\"WebSocketAcceptEvent\", message)\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n get_path_with_query_string(self.scope),\n )\n self.initial_response = None\n self.accepted_subprotocol = cast(\n Optional[Subprotocol], message.get(\"subprotocol\")\n )\n if \"headers\" in message:\n self.extra_headers.extend(\n # ASGI spec requires bytes\n # But for compatibility we need to convert it to strings\n (name.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for name, value in message[\"headers\"]\n )\n self.handshake_started_event.set()\n\n elif message_type == \"websocket.close\":\n message = cast(\"WebSocketCloseEvent\", message)\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n get_path_with_query_string(self.scope),\n )\n self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b\"\")\n self.handshake_started_event.set()\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.accept' or 'websocket.close', \"\n \"but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n elif not self.closed_event.is_set():\n await self.handshake_completed_event.wait()\n\n if message_type == \"websocket.send\":\n message = cast(\"WebSocketSendEvent\", message)\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n await self.send(data) # type: ignore[arg-type]\n\n elif message_type == \"websocket.close\":\n message = cast(\"WebSocketCloseEvent\", message)\n code = message.get(\"code\", 1000)\n reason = message.get(\"reason\", \"\") or \"\"\n await self.close(code, reason)\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.send' or 'websocket.close',\"\n \" but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def asgi_receive(\n self,\n ) -> Union[\n \"WebSocketDisconnectEvent\", \"WebSocketConnectEvent\", \"WebSocketReceiveEvent\"\n ]:\n if not self.connect_sent:\n self.connect_sent = True\n return {\"type\": \"websocket.connect\"}\n\n await self.handshake_completed_event.wait()\n\n if self.lost_connection_before_handshake:\n # If the handshake failed or the app closed before handshake completion,\n # use 1006 Abnormal Closure.\n return {\"type\": \"websocket.disconnect\", \"code\": 1006}\n\n if self.closed_event.is_set():\n return {\"type\": \"websocket.disconnect\", \"code\": 1005}\n\n try:\n data = await self.recv()\n except ConnectionClosed as exc:\n self.closed_event.set()\n if self.ws_server.closing:\n return {\"type\": \"websocket.disconnect\", \"code\": 1012}\n return {\"type\": \"websocket.disconnect\", \"code\": exc.code}\n\n msg: WebSocketReceiveEvent = { # type: ignore[typeddict-item]\n \"type\": \"websocket.receive\"\n }\n\n if isinstance(data, str):\n msg[\"text\"] = data\n else:\n msg[\"bytes\"] = data\n\n return msg\n", "path": "uvicorn/protocols/websockets/websockets_impl.py" } ]
diff --git a/requirements.txt b/requirements.txt index a99dc024a..305bd0186 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ h11 @ git+https://github.com/python-hyper/h11.git@master # Explicit optionals a2wsgi==1.7.0 wsproto==1.2.0 -websockets==10.4 +websockets==11.0.3 # Packaging build==0.10.0 diff --git a/tests/protocols/test_websocket.py b/tests/protocols/test_websocket.py index 1dd82bbcb..17f2a92d1 100644 --- a/tests/protocols/test_websocket.py +++ b/tests/protocols/test_websocket.py @@ -577,7 +577,7 @@ async def test_asgi_return_value( unused_tcp_port: int, ): """ - The ASGI callable should return 'None'. If it doesn't make sure that + The ASGI callable should return 'None'. If it doesn't, make sure that the connection is closed with an error condition. """ diff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py index 04d41bad9..089eeb536 100644 --- a/uvicorn/protocols/websockets/websockets_impl.py +++ b/uvicorn/protocols/websockets/websockets_impl.py @@ -236,7 +236,7 @@ async def ws_handler( # type: ignore[override] 'send' and 'receive' events to drive the flow. """ self.handshake_completed_event.set() - await self.closed_event.wait() + await self.wait_closed() async def run_asgi(self) -> None: """
DataDog__dd-trace-py-3119
aioredis integration: Tracing breaks pipeline as context managers Hello, looks like the `aioredis` integration breaks the interface of `Pipeline` objects as context managers: ```py RuntimeWarning: coroutine 'traced_pipeline' was never awaited ``` and ```py async with redis.pipeline(transaction=True) as pipe: AttributeError: __aexit__ ``` This is bad since the documented of usage is exactly as context managers (see https://aioredis.readthedocs.io/en/latest/migration/#pipelines-and-transactions-multiexec). The fix for now is to just use pipelines outside of contexts, without relying on them as context managers, but that is less than ideal. `ddtrace` is the latest version (`0.57.0`).
[ { "content": "import sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\nfrom ..redis.util import format_command_args\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\nasync def traced_pipeline(func, instance, args, kwargs):\n pipeline = await func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [format_command_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n span = pin.tracer.start_span(\n redisx.CMD, service=trace_utils.ext_service(pin, config.aioredis), span_type=SpanTypes.REDIS, activate=False\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = format_command_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n except Exception:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(format_command_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py" } ]
[ { "content": "import sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\nfrom ..redis.util import format_command_args\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [format_command_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n span = pin.tracer.start_span(\n redisx.CMD, service=trace_utils.ext_service(pin, config.aioredis), span_type=SpanTypes.REDIS, activate=False\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = format_command_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n except Exception:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(format_command_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py" } ]
diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py index c2752835c9f..286e65c0053 100644 --- a/ddtrace/contrib/aioredis/patch.py +++ b/ddtrace/contrib/aioredis/patch.py @@ -70,8 +70,8 @@ async def traced_execute_command(func, instance, args, kwargs): return await func(*args, **kwargs) -async def traced_pipeline(func, instance, args, kwargs): - pipeline = await func(*args, **kwargs) +def traced_pipeline(func, instance, args, kwargs): + pipeline = func(*args, **kwargs) pin = Pin.get_from(instance) if pin: pin.onto(pipeline) diff --git a/releasenotes/notes/fix-aioredis-async-with-pipeline-805966300810edf8.yaml b/releasenotes/notes/fix-aioredis-async-with-pipeline-805966300810edf8.yaml new file mode 100644 index 00000000000..e09f30b640a --- /dev/null +++ b/releasenotes/notes/fix-aioredis-async-with-pipeline-805966300810edf8.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixes incompatibility of wrapped aioredis pipelines in ``async with`` statements. diff --git a/tests/contrib/aioredis/test_aioredis.py b/tests/contrib/aioredis/test_aioredis.py index 33e736c2345..b62d74f35dd 100644 --- a/tests/contrib/aioredis/test_aioredis.py +++ b/tests/contrib/aioredis/test_aioredis.py @@ -153,6 +153,35 @@ async def test_pipeline_traced(redis_client): assert response_list[3].decode() == "bar" [email protected](aioredis_version < (2, 0), reason="only supported in aioredis >= 2.0") [email protected] [email protected] +async def test_pipeline_traced_context_manager_transaction(redis_client): + """ + Regression test for: https://github.com/DataDog/dd-trace-py/issues/3106 + + https://aioredis.readthedocs.io/en/latest/migration/#pipelines-and-transactions-multiexec + + Example:: + + async def main(): + redis = await aioredis.from_url("redis://localhost") + async with redis.pipeline(transaction=True) as pipe: + ok1, ok2 = await (pipe.set("key1", "value1").set("key2", "value2").execute()) + assert ok1 + assert ok2 + """ + + async with redis_client.pipeline(transaction=True) as p: + set_1, set_2, get_1, get_2 = await (p.set("blah", "boo").set("foo", "bar").get("blah").get("foo").execute()) + + # response from redis.set is OK if successfully pushed + assert set_1 is True + assert set_2 is True + assert get_1.decode() == "boo" + assert get_2.decode() == "bar" + + @pytest.mark.asyncio @pytest.mark.snapshot(variants={"": aioredis_version >= (2, 0), "13": aioredis_version < (2, 0)}) async def test_two_traced_pipelines(redis_client): diff --git a/tests/snapshots/tests.contrib.aioredis.test_aioredis.test_pipeline_traced_context_manager_transaction.json b/tests/snapshots/tests.contrib.aioredis.test_aioredis.test_pipeline_traced_context_manager_transaction.json new file mode 100644 index 00000000000..b969f3520fb --- /dev/null +++ b/tests/snapshots/tests.contrib.aioredis.test_aioredis.test_pipeline_traced_context_manager_transaction.json @@ -0,0 +1,28 @@ +[[ + { + "name": "redis.command", + "service": "redis", + "resource": "SET blah boo\nSET foo bar\nGET blah\nGET foo", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "redis", + "meta": { + "out.host": "127.0.0.1", + "redis.raw_command": "SET blah boo\nSET foo bar\nGET blah\nGET foo", + "runtime-id": "b734eb991b1f45f2b063db6d3c5623b9" + }, + "metrics": { + "_dd.agent_psr": 1.0, + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "out.port": 6379, + "out.redis_db": 0, + "redis.pipeline_length": 4, + "system.pid": 28312 + }, + "duration": 2132000, + "start": 1641496497488785000 + }]]
Lightning-AI__torchmetrics-1782
NLTK punkt bug ## 🐛 Bug [This line](https://github.com/Lightning-AI/torchmetrics/blob/17c0e9fdf636cff7299fc3d86fa5e3709b603737/src/torchmetrics/functional/text/rouge.py#L52) `nltk.data.find("tokenizers/punkt.zip")` always raises LookupError: ``` LookupError: ********************************************************************** Resource punkt not found. Please use the NLTK Downloader to obtain the resource: >>> import nltk >>> nltk.download('punkt') For more information see: https://www.nltk.org/data.html Attempted to load tokenizers/punkt.zip Searched in: - '/home/**/nltk_data' - '/data/**/anaconda3/nltk_data' - '/data/**/anaconda3/share/nltk_data' - '/data/**/anaconda3/lib/nltk_data' - '/usr/share/nltk_data' - '/usr/local/share/nltk_data' - '/usr/lib/nltk_data' - '/usr/local/lib/nltk_data' ********************************************************************** ``` Maybe it should be `nltk.data.find("tokenizers/punkt")`? After the downloading that follows, this calling to `find` without .zip extension works successfully. ### To Reproduce ``` import nltk try: nltk.data.find("tokenizers/punkt") except LookupError: try: nltk.download("punkt", quiet=True, force=False, halt_on_error=False, raise_on_error=True) except ValueError: raise OSError( "`nltk` resource `punkt` is not available on a disk and cannot be downloaded as a machine is not " "connected to the internet." ) nltk.data.find("tokenizers/punkt.zip") ``` ### Expected behavior No error raised. ### Environment - TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): 0.11.4, pip installed - Python & PyTorch Version (e.g., 1.0): Python 3.7.10, PyTorch 1.13.0 - Any other relevant information such as OS (e.g., Linux): Ubuntu 18.04, nltk 3.8.1 ### Additional context The above information should be enough.
[ { "content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport re\nimport urllib.request\nfrom collections import Counter\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union\nfrom urllib.request import HTTPError\n\nimport torch\nfrom torch import Tensor, tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.utilities.imports import _NLTK_AVAILABLE\n\n__doctest_requires__ = {(\"rouge_score\", \"_rouge_score_update\"): [\"nltk\"]}\n\nALLOWED_ROUGE_KEYS: Dict[str, Union[int, str]] = {\n \"rouge1\": 1,\n \"rouge2\": 2,\n \"rouge3\": 3,\n \"rouge4\": 4,\n \"rouge5\": 5,\n \"rouge6\": 6,\n \"rouge7\": 7,\n \"rouge8\": 8,\n \"rouge9\": 9,\n \"rougeL\": \"L\",\n \"rougeLsum\": \"Lsum\",\n}\nALLOWED_ACCUMULATE_VALUES = (\"avg\", \"best\")\n\n\ndef _ensure_nltk_punkt_is_downloaded() -> None:\n \"\"\"Check whether `nltk` `punkt` is downloaded.\n\n If not, try to download if a machine is connected to the internet.\n \"\"\"\n import nltk\n\n try:\n nltk.data.find(\"tokenizers/punkt.zip\")\n except LookupError:\n try:\n nltk.download(\"punkt\", quiet=True, force=False, halt_on_error=False, raise_on_error=True)\n except ValueError:\n raise OSError(\n \"`nltk` resource `punkt` is not available on a disk and cannot be downloaded as a machine is not \"\n \"connected to the internet.\"\n )\n\n\ndef _split_sentence(x: str) -> Sequence[str]:\n \"\"\"Split sentence to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.\"\"\"\n if not _NLTK_AVAILABLE:\n raise ModuleNotFoundError(\"ROUGE-Lsum calculation requires that `nltk` is installed. Use `pip install nltk`.\")\n import nltk\n\n _ensure_nltk_punkt_is_downloaded()\n\n re.sub(\"<n>\", \"\", x) # remove pegasus newline char\n return nltk.sent_tokenize(x)\n\n\ndef _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) -> Dict[str, Tensor]:\n \"\"\"Compute overall metrics.\n\n This function computes precision, recall and F1 score based on hits/lcs, the length of lists of tokenizer\n predicted and target sentences.\n\n Args:\n hits_or_lcs: A number of matches or a length of the longest common subsequence.\n pred_len: A length of a tokenized predicted sentence.\n target_len: A length of a tokenized target sentence.\n \"\"\"\n precision = hits_or_lcs / pred_len\n recall = hits_or_lcs / target_len\n if precision == recall == 0.0:\n return {\"precision\": tensor(0.0), \"recall\": tensor(0.0), \"fmeasure\": tensor(0.0)}\n\n fmeasure = 2 * precision * recall / (precision + recall)\n return {\"precision\": tensor(precision), \"recall\": tensor(recall), \"fmeasure\": tensor(fmeasure)}\n\n\ndef _lcs(\n pred_tokens: Sequence[str], target_tokens: Sequence[str], return_full_table: bool = False\n) -> Union[int, Sequence[Sequence[int]]]:\n \"\"\"DP algorithm to compute the length of the longest common subsequence.\n\n Args:\n pred_tokens: A tokenized predicted sentence.\n target_tokens: A tokenized target sentence.\n return_full_table: If the full table of logest common subsequence should be returned or just the largest\n \"\"\"\n lcs = [[0] * (len(pred_tokens) + 1) for _ in range(len(target_tokens) + 1)]\n for i in range(1, len(target_tokens) + 1):\n for j in range(1, len(pred_tokens) + 1):\n if target_tokens[i - 1] == pred_tokens[j - 1]:\n lcs[i][j] = lcs[i - 1][j - 1] + 1\n else:\n lcs[i][j] = max(lcs[i - 1][j], lcs[i][j - 1])\n if return_full_table:\n return lcs\n return lcs[-1][-1]\n\n\ndef _backtracked_lcs(\n lcs_table: Sequence[Sequence[int]], pred_tokens: Sequence[str], target_tokens: Sequence[str]\n) -> Sequence[int]:\n \"\"\"Backtrack LCS table.\n\n Args:\n lcs_table: A table containing information for the calculation of the longest common subsequence.\n pred_tokens: A tokenized predicted sentence.\n target_tokens: A tokenized target sentence.\n \"\"\"\n i = len(pred_tokens)\n j = len(target_tokens)\n backtracked_lcs: List[int] = []\n while i > 0 and j > 0:\n if pred_tokens[i - 1] == target_tokens[j - 1]:\n backtracked_lcs.insert(0, j - 1)\n i -= 1\n j -= 1\n elif lcs_table[j][i - 1] > lcs_table[j - 1][i]:\n i -= 1\n else:\n j -= 1\n return backtracked_lcs\n\n\ndef _union_lcs(pred_tokens_list: Sequence[Sequence[str]], target_tokens: Sequence[str]) -> Sequence[str]:\n r\"\"\"Find union LCS between a target sentence and iterable of predicted tokens.\n\n Args:\n pred_tokens_list: A tokenized predicted sentence split by ``'\\n'``.\n target_tokens: A tokenized single part of target sentence split by ``'\\n'``.\n \"\"\"\n\n def lcs_ind(pred_tokens: Sequence[str], target_tokens: Sequence[str]) -> Sequence[int]:\n \"\"\"Return one of the longest of longest common subsequence via backtracked lcs table.\"\"\"\n lcs_table: Sequence[Sequence[int]] = _lcs(pred_tokens, target_tokens, return_full_table=True) # type: ignore\n return _backtracked_lcs(lcs_table, pred_tokens, target_tokens)\n\n def find_union(lcs_tables: Sequence[Sequence[int]]) -> Sequence[int]:\n \"\"\"Find union LCS given a list of LCS.\"\"\"\n return sorted(set().union(*lcs_tables))\n\n lcs_tables = [lcs_ind(pred_tokens, target_tokens) for pred_tokens in pred_tokens_list]\n return [target_tokens[i] for i in find_union(lcs_tables)]\n\n\ndef _normalize_and_tokenize_text(\n text: str,\n stemmer: Optional[Any] = None,\n normalizer: Optional[Callable[[str], str]] = None,\n tokenizer: Optional[Callable[[str], Sequence[str]]] = None,\n) -> Sequence[str]:\n \"\"\"Rouge score should be calculated only over lowercased words and digits.\n\n Optionally, Porter stemmer can be used to strip word suffixes to improve matching. The text normalization follows\n the implemantion from `Rouge score_Text Normalizition`_.\n\n Args:\n text: An input sentence.\n stemmer: Porter stemmer instance to strip word suffixes to improve matching.\n normalizer: A user's own normalizer function.\n If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.\n This function must take a ``str`` and return a ``str``.\n tokenizer:\n A user's own tokenizer function. If this is ``None``, splitting by spaces is default\n This function must take a ``str`` and return ``Sequence[str]``\n \"\"\"\n # If normalizer is none, replace any non-alpha-numeric characters with spaces.\n text = normalizer(text) if callable(normalizer) else re.sub(r\"[^a-z0-9]+\", \" \", text.lower())\n\n # If tokenizer is none, spliting by spaces\n tokens = tokenizer(text) if callable(tokenizer) else re.split(r\"\\s+\", text)\n\n if stemmer:\n # Only stem words more than 3 characters long.\n tokens = [stemmer.stem(x) if len(x) > 3 else x for x in tokens]\n\n # One final check to drop any empty or invalid tokens.\n return [x for x in tokens if (isinstance(x, str) and len(x) > 0)]\n\n\ndef _rouge_n_score(pred: Sequence[str], target: Sequence[str], n_gram: int) -> Dict[str, Tensor]:\n \"\"\"Compute precision, recall and F1 score for the Rouge-N metric.\n\n Args:\n pred: A predicted sentence.\n target: A target sentence.\n n_gram: N-gram overlap.\n \"\"\"\n\n def _create_ngrams(tokens: Sequence[str], n: int) -> Counter:\n ngrams: Counter = Counter()\n for ngram in (tuple(tokens[i : i + n]) for i in range(len(tokens) - n + 1)):\n ngrams[ngram] += 1\n return ngrams\n\n pred_ngrams, target_ngrams = _create_ngrams(pred, n_gram), _create_ngrams(target, n_gram)\n pred_len, target_len = sum(pred_ngrams.values()), sum(target_ngrams.values())\n if 0 in (pred_len, target_len):\n return {\"precision\": tensor(0.0), \"recall\": tensor(0.0), \"fmeasure\": tensor(0.0)}\n\n # It is sufficient to take a set(pred_tokenized) for hits count as we consider intersenction of pred & target\n hits = sum(min(pred_ngrams[w], target_ngrams[w]) for w in set(pred_ngrams))\n return _compute_metrics(hits, max(pred_len, 1), max(target_len, 1))\n\n\ndef _rouge_l_score(pred: Sequence[str], target: Sequence[str]) -> Dict[str, Tensor]:\n \"\"\"Compute precision, recall and F1 score for the Rouge-L metric.\n\n Args:\n pred: A predicted sentence.\n target: A target sentence.\n \"\"\"\n pred_len, target_len = len(pred), len(target)\n if 0 in (pred_len, target_len):\n return {\"precision\": tensor(0.0), \"recall\": tensor(0.0), \"fmeasure\": tensor(0.0)}\n\n lcs: int = _lcs(pred, target) # type: ignore\n return _compute_metrics(lcs, pred_len, target_len)\n\n\ndef _rouge_lsum_score(pred: Sequence[Sequence[str]], target: Sequence[Sequence[str]]) -> Dict[str, Tensor]:\n r\"\"\"Compute precision, recall and F1 score for the Rouge-LSum metric.\n\n More information can be found in Section 3.2 of the referenced paper [1]. This implementation follow the official\n implementation from:\n https://github.com/google-research/google-research/blob/master/rouge/rouge_scorer.py.\n\n Args:\n pred: An iterable of predicted sentence split by '\\n'.\n target: An iterable target sentence split by '\\n'.\n\n References:\n [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin. https://aclanthology.org/W04-1013/\n \"\"\"\n pred_len = sum(map(len, pred))\n target_len = sum(map(len, target))\n if 0 in (pred_len, target_len):\n return {\"precision\": tensor(0.0), \"recall\": tensor(0.0), \"fmeasure\": tensor(0.0)}\n\n # Get token counts\n def _get_token_counts(sentences: Sequence[Sequence[str]]) -> Counter:\n ngrams: Counter = Counter()\n for sentence in sentences:\n ngrams.update(sentence)\n return ngrams\n\n pred_tokens_count = _get_token_counts(pred)\n target_tokens_count = _get_token_counts(target)\n\n # Calculate hits\n hits = 0\n for tgt in target:\n lcs = _union_lcs(pred, tgt)\n for token in lcs:\n if pred_tokens_count[token] > 0 and target_tokens_count[token] > 0:\n hits += 1\n pred_tokens_count[token] -= 1\n target_tokens_count[token] -= 1\n\n return _compute_metrics(hits, pred_len, target_len)\n\n\ndef _rouge_score_update(\n preds: Sequence[str],\n target: Sequence[Sequence[str]],\n rouge_keys_values: List[Union[int, str]],\n accumulate: str,\n stemmer: Optional[Any] = None,\n normalizer: Optional[Callable[[str], str]] = None,\n tokenizer: Optional[Callable[[str], Sequence[str]]] = None,\n) -> Dict[Union[int, str], List[Dict[str, Tensor]]]:\n \"\"\"Update the rouge score with the current set of predicted and target sentences.\n\n Args:\n preds: An iterable of predicted sentences.\n target: An iterable of iterable of target sentences.\n rouge_keys_values: List of N-grams/'L'/'Lsum' arguments.\n accumulate: Useful incase of multi-reference rouge score.\n ``avg`` takes the avg of all references with respect to predictions\n ``best`` takes the best fmeasure score obtained between prediction and multiple corresponding references.\n Allowed values are ``avg`` and ``best``.\n stemmer: Porter stemmer instance to strip word suffixes to improve matching.\n normalizer:\n A user's own normalizer function.\n If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.\n This function must take a `str` and return a `str`.\n tokenizer:\n A user's own tokenizer function. If this is ``None``, spliting by spaces is default\n This function must take a `str` and return `Sequence[str]`\n\n Example:\n >>> preds = \"My name is John\".split()\n >>> target = \"Is your name John\".split()\n >>> from pprint import pprint\n >>> score = _rouge_score_update(preds, target, rouge_keys_values=[1, 2, 3, 'L'], accumulate='best')\n >>> pprint(score)\n {1: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}],\n 2: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}],\n 3: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}],\n 'L': [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}]}\n \"\"\"\n results: Dict[Union[int, str], List[Dict[str, Tensor]]] = {rouge_key: [] for rouge_key in rouge_keys_values}\n\n for pred_raw, target_raw in zip(preds, target):\n result_inner: Dict[Union[int, str], Dict[str, Tensor]] = {rouge_key: {} for rouge_key in rouge_keys_values}\n result_avg: Dict[Union[int, str], List[Dict[str, Tensor]]] = {rouge_key: [] for rouge_key in rouge_keys_values}\n list_results = []\n pred = _normalize_and_tokenize_text(pred_raw, stemmer, normalizer, tokenizer)\n if \"Lsum\" in rouge_keys_values:\n pred_lsum = [\n _normalize_and_tokenize_text(pred_sentence, stemmer, normalizer, tokenizer)\n for pred_sentence in _split_sentence(pred_raw)\n ]\n\n for target_raw_inner in target_raw:\n tgt = _normalize_and_tokenize_text(target_raw_inner, stemmer, normalizer, tokenizer)\n\n if \"Lsum\" in rouge_keys_values:\n target_lsum = [\n _normalize_and_tokenize_text(tgt_sentence, stemmer, normalizer, tokenizer)\n for tgt_sentence in _split_sentence(target_raw_inner)\n ]\n\n for rouge_key in rouge_keys_values:\n if isinstance(rouge_key, int):\n score = _rouge_n_score(pred, tgt, rouge_key)\n elif rouge_key == \"L\":\n score = _rouge_l_score(pred, tgt)\n elif rouge_key == \"Lsum\":\n score = _rouge_lsum_score(pred_lsum, target_lsum)\n result_inner[rouge_key] = score\n result_avg[rouge_key].append(score)\n list_results.append(result_inner.copy())\n\n if accumulate == \"best\":\n key_curr = rouge_keys_values[0]\n all_fmeasure = torch.tensor([v[key_curr][\"fmeasure\"] for v in list_results])\n highest_idx = int(torch.argmax(all_fmeasure).item())\n\n for rouge_key in rouge_keys_values:\n results[rouge_key].append(list_results[highest_idx][rouge_key])\n\n elif accumulate == \"avg\":\n new_result_avg: Dict[Union[int, str], Dict[str, Tensor]] = {\n rouge_key: {} for rouge_key in rouge_keys_values\n }\n for rouge_key, metrics in result_avg.items():\n _dict_metric_score_batch: Dict[str, List[Tensor]] = {}\n for metric in metrics:\n for _type, value in metric.items():\n if _type not in _dict_metric_score_batch:\n _dict_metric_score_batch[_type] = []\n _dict_metric_score_batch[_type].append(value)\n\n new_result_avg[rouge_key] = {\n _type: torch.tensor(_dict_metric_score_batch[_type]).mean() for _type in _dict_metric_score_batch\n }\n\n for rouge_key in rouge_keys_values:\n results[rouge_key].append(new_result_avg[rouge_key])\n\n return results\n\n\ndef _rouge_score_compute(sentence_results: Dict[str, List[Tensor]]) -> Dict[str, Tensor]:\n \"\"\"Compute the combined ROUGE metric for all the input set of predicted and target sentences.\n\n Args:\n sentence_results: Rouge-N/Rouge-L/Rouge-LSum metrics calculated for single sentence.\n \"\"\"\n results: Dict[str, Tensor] = {}\n # Obtain mean scores for individual rouge metrics\n if sentence_results == {}:\n return results\n\n for rouge_key, scores in sentence_results.items():\n results[rouge_key] = torch.tensor(scores).mean()\n\n return results\n\n\ndef rouge_score(\n preds: Union[str, Sequence[str]],\n target: Union[str, Sequence[str], Sequence[Sequence[str]]],\n accumulate: Literal[\"avg\", \"best\"] = \"best\",\n use_stemmer: bool = False,\n normalizer: Optional[Callable[[str], str]] = None,\n tokenizer: Optional[Callable[[str], Sequence[str]]] = None,\n rouge_keys: Union[str, Tuple[str, ...]] = (\"rouge1\", \"rouge2\", \"rougeL\", \"rougeLsum\"),\n) -> Dict[str, Tensor]:\n \"\"\"Calculate `Calculate Rouge Score`_ , used for automatic summarization.\n\n Args:\n preds: An iterable of predicted sentences or a single predicted sentence.\n target:\n An iterable of iterables of target sentences or an iterable of target sentences or a single target sentence.\n accumulate:\n Useful incase of multi-reference rouge score.\n\n - ``avg`` takes the avg of all references with respect to predictions\n - ``best`` takes the best fmeasure score obtained between prediction and multiple corresponding references.\n\n use_stemmer: Use Porter stemmer to strip word suffixes to improve matching.\n normalizer: A user's own normalizer function.\n If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.\n This function must take a ``str`` and return a ``str``.\n tokenizer: A user's own tokenizer function. If this is ``None``, spliting by spaces is default\n This function must take a ``str`` and return ``Sequence[str]``\n rouge_keys: A list of rouge types to calculate.\n Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``.\n\n Return:\n Python dictionary of rouge scores for each input rouge key.\n\n Example:\n >>> from torchmetrics.functional.text.rouge import rouge_score\n >>> preds = \"My name is John\"\n >>> target = \"Is your name John\"\n >>> from pprint import pprint\n >>> pprint(rouge_score(preds, target))\n {'rouge1_fmeasure': tensor(0.7500),\n 'rouge1_precision': tensor(0.7500),\n 'rouge1_recall': tensor(0.7500),\n 'rouge2_fmeasure': tensor(0.),\n 'rouge2_precision': tensor(0.),\n 'rouge2_recall': tensor(0.),\n 'rougeL_fmeasure': tensor(0.5000),\n 'rougeL_precision': tensor(0.5000),\n 'rougeL_recall': tensor(0.5000),\n 'rougeLsum_fmeasure': tensor(0.5000),\n 'rougeLsum_precision': tensor(0.5000),\n 'rougeLsum_recall': tensor(0.5000)}\n\n\n Raises:\n ModuleNotFoundError:\n If the python package ``nltk`` is not installed.\n ValueError:\n If any of the ``rouge_keys`` does not belong to the allowed set of keys.\n\n References:\n [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin. https://aclanthology.org/W04-1013/\n \"\"\"\n if use_stemmer:\n if not _NLTK_AVAILABLE:\n raise ModuleNotFoundError(\"Stemmer requires that `nltk` is installed. Use `pip install nltk`.\")\n import nltk\n\n stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None\n\n if not isinstance(rouge_keys, tuple):\n rouge_keys = (rouge_keys,)\n for key in rouge_keys:\n if key not in ALLOWED_ROUGE_KEYS.keys():\n raise ValueError(f\"Got unknown rouge key {key}. Expected to be one of {list(ALLOWED_ROUGE_KEYS.keys())}\")\n rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys]\n\n if isinstance(target, list) and all(isinstance(tgt, str) for tgt in target):\n target = [target] if isinstance(preds, str) else [[tgt] for tgt in target]\n\n if isinstance(preds, str):\n preds = [preds]\n\n if isinstance(target, str):\n target = [[target]]\n\n sentence_results: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update(\n preds,\n target,\n rouge_keys_values,\n stemmer=stemmer,\n normalizer=normalizer,\n tokenizer=tokenizer,\n accumulate=accumulate,\n )\n\n output: Dict[str, List[Tensor]] = {}\n for rouge_key in rouge_keys_values:\n for tp in [\"fmeasure\", \"precision\", \"recall\"]:\n output[f\"rouge{rouge_key}_{tp}\"] = []\n\n for rouge_key, metrics in sentence_results.items():\n for metric in metrics:\n for tp, value in metric.items():\n output[f\"rouge{rouge_key}_{tp}\"].append(value)\n\n return _rouge_score_compute(output)\n", "path": "src/torchmetrics/functional/text/rouge.py" } ]
[ { "content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport re\nimport urllib.request\nfrom collections import Counter\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union\nfrom urllib.request import HTTPError\n\nimport torch\nfrom torch import Tensor, tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.utilities.imports import _NLTK_AVAILABLE\n\n__doctest_requires__ = {(\"rouge_score\", \"_rouge_score_update\"): [\"nltk\"]}\n\nALLOWED_ROUGE_KEYS: Dict[str, Union[int, str]] = {\n \"rouge1\": 1,\n \"rouge2\": 2,\n \"rouge3\": 3,\n \"rouge4\": 4,\n \"rouge5\": 5,\n \"rouge6\": 6,\n \"rouge7\": 7,\n \"rouge8\": 8,\n \"rouge9\": 9,\n \"rougeL\": \"L\",\n \"rougeLsum\": \"Lsum\",\n}\nALLOWED_ACCUMULATE_VALUES = (\"avg\", \"best\")\n\n\ndef _ensure_nltk_punkt_is_downloaded() -> None:\n \"\"\"Check whether `nltk` `punkt` is downloaded.\n\n If not, try to download if a machine is connected to the internet.\n \"\"\"\n import nltk\n\n try:\n nltk.data.find(\"tokenizers/punkt\")\n except LookupError:\n try:\n nltk.download(\"punkt\", quiet=True, force=False, halt_on_error=False, raise_on_error=True)\n except ValueError:\n raise OSError(\n \"`nltk` resource `punkt` is not available on a disk and cannot be downloaded as a machine is not \"\n \"connected to the internet.\"\n )\n\n\ndef _split_sentence(x: str) -> Sequence[str]:\n \"\"\"Split sentence to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.\"\"\"\n if not _NLTK_AVAILABLE:\n raise ModuleNotFoundError(\"ROUGE-Lsum calculation requires that `nltk` is installed. Use `pip install nltk`.\")\n import nltk\n\n _ensure_nltk_punkt_is_downloaded()\n\n re.sub(\"<n>\", \"\", x) # remove pegasus newline char\n return nltk.sent_tokenize(x)\n\n\ndef _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) -> Dict[str, Tensor]:\n \"\"\"Compute overall metrics.\n\n This function computes precision, recall and F1 score based on hits/lcs, the length of lists of tokenizer\n predicted and target sentences.\n\n Args:\n hits_or_lcs: A number of matches or a length of the longest common subsequence.\n pred_len: A length of a tokenized predicted sentence.\n target_len: A length of a tokenized target sentence.\n \"\"\"\n precision = hits_or_lcs / pred_len\n recall = hits_or_lcs / target_len\n if precision == recall == 0.0:\n return {\"precision\": tensor(0.0), \"recall\": tensor(0.0), \"fmeasure\": tensor(0.0)}\n\n fmeasure = 2 * precision * recall / (precision + recall)\n return {\"precision\": tensor(precision), \"recall\": tensor(recall), \"fmeasure\": tensor(fmeasure)}\n\n\ndef _lcs(\n pred_tokens: Sequence[str], target_tokens: Sequence[str], return_full_table: bool = False\n) -> Union[int, Sequence[Sequence[int]]]:\n \"\"\"DP algorithm to compute the length of the longest common subsequence.\n\n Args:\n pred_tokens: A tokenized predicted sentence.\n target_tokens: A tokenized target sentence.\n return_full_table: If the full table of logest common subsequence should be returned or just the largest\n \"\"\"\n lcs = [[0] * (len(pred_tokens) + 1) for _ in range(len(target_tokens) + 1)]\n for i in range(1, len(target_tokens) + 1):\n for j in range(1, len(pred_tokens) + 1):\n if target_tokens[i - 1] == pred_tokens[j - 1]:\n lcs[i][j] = lcs[i - 1][j - 1] + 1\n else:\n lcs[i][j] = max(lcs[i - 1][j], lcs[i][j - 1])\n if return_full_table:\n return lcs\n return lcs[-1][-1]\n\n\ndef _backtracked_lcs(\n lcs_table: Sequence[Sequence[int]], pred_tokens: Sequence[str], target_tokens: Sequence[str]\n) -> Sequence[int]:\n \"\"\"Backtrack LCS table.\n\n Args:\n lcs_table: A table containing information for the calculation of the longest common subsequence.\n pred_tokens: A tokenized predicted sentence.\n target_tokens: A tokenized target sentence.\n \"\"\"\n i = len(pred_tokens)\n j = len(target_tokens)\n backtracked_lcs: List[int] = []\n while i > 0 and j > 0:\n if pred_tokens[i - 1] == target_tokens[j - 1]:\n backtracked_lcs.insert(0, j - 1)\n i -= 1\n j -= 1\n elif lcs_table[j][i - 1] > lcs_table[j - 1][i]:\n i -= 1\n else:\n j -= 1\n return backtracked_lcs\n\n\ndef _union_lcs(pred_tokens_list: Sequence[Sequence[str]], target_tokens: Sequence[str]) -> Sequence[str]:\n r\"\"\"Find union LCS between a target sentence and iterable of predicted tokens.\n\n Args:\n pred_tokens_list: A tokenized predicted sentence split by ``'\\n'``.\n target_tokens: A tokenized single part of target sentence split by ``'\\n'``.\n \"\"\"\n\n def lcs_ind(pred_tokens: Sequence[str], target_tokens: Sequence[str]) -> Sequence[int]:\n \"\"\"Return one of the longest of longest common subsequence via backtracked lcs table.\"\"\"\n lcs_table: Sequence[Sequence[int]] = _lcs(pred_tokens, target_tokens, return_full_table=True) # type: ignore\n return _backtracked_lcs(lcs_table, pred_tokens, target_tokens)\n\n def find_union(lcs_tables: Sequence[Sequence[int]]) -> Sequence[int]:\n \"\"\"Find union LCS given a list of LCS.\"\"\"\n return sorted(set().union(*lcs_tables))\n\n lcs_tables = [lcs_ind(pred_tokens, target_tokens) for pred_tokens in pred_tokens_list]\n return [target_tokens[i] for i in find_union(lcs_tables)]\n\n\ndef _normalize_and_tokenize_text(\n text: str,\n stemmer: Optional[Any] = None,\n normalizer: Optional[Callable[[str], str]] = None,\n tokenizer: Optional[Callable[[str], Sequence[str]]] = None,\n) -> Sequence[str]:\n \"\"\"Rouge score should be calculated only over lowercased words and digits.\n\n Optionally, Porter stemmer can be used to strip word suffixes to improve matching. The text normalization follows\n the implemantion from `Rouge score_Text Normalizition`_.\n\n Args:\n text: An input sentence.\n stemmer: Porter stemmer instance to strip word suffixes to improve matching.\n normalizer: A user's own normalizer function.\n If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.\n This function must take a ``str`` and return a ``str``.\n tokenizer:\n A user's own tokenizer function. If this is ``None``, splitting by spaces is default\n This function must take a ``str`` and return ``Sequence[str]``\n \"\"\"\n # If normalizer is none, replace any non-alpha-numeric characters with spaces.\n text = normalizer(text) if callable(normalizer) else re.sub(r\"[^a-z0-9]+\", \" \", text.lower())\n\n # If tokenizer is none, spliting by spaces\n tokens = tokenizer(text) if callable(tokenizer) else re.split(r\"\\s+\", text)\n\n if stemmer:\n # Only stem words more than 3 characters long.\n tokens = [stemmer.stem(x) if len(x) > 3 else x for x in tokens]\n\n # One final check to drop any empty or invalid tokens.\n return [x for x in tokens if (isinstance(x, str) and len(x) > 0)]\n\n\ndef _rouge_n_score(pred: Sequence[str], target: Sequence[str], n_gram: int) -> Dict[str, Tensor]:\n \"\"\"Compute precision, recall and F1 score for the Rouge-N metric.\n\n Args:\n pred: A predicted sentence.\n target: A target sentence.\n n_gram: N-gram overlap.\n \"\"\"\n\n def _create_ngrams(tokens: Sequence[str], n: int) -> Counter:\n ngrams: Counter = Counter()\n for ngram in (tuple(tokens[i : i + n]) for i in range(len(tokens) - n + 1)):\n ngrams[ngram] += 1\n return ngrams\n\n pred_ngrams, target_ngrams = _create_ngrams(pred, n_gram), _create_ngrams(target, n_gram)\n pred_len, target_len = sum(pred_ngrams.values()), sum(target_ngrams.values())\n if 0 in (pred_len, target_len):\n return {\"precision\": tensor(0.0), \"recall\": tensor(0.0), \"fmeasure\": tensor(0.0)}\n\n # It is sufficient to take a set(pred_tokenized) for hits count as we consider intersenction of pred & target\n hits = sum(min(pred_ngrams[w], target_ngrams[w]) for w in set(pred_ngrams))\n return _compute_metrics(hits, max(pred_len, 1), max(target_len, 1))\n\n\ndef _rouge_l_score(pred: Sequence[str], target: Sequence[str]) -> Dict[str, Tensor]:\n \"\"\"Compute precision, recall and F1 score for the Rouge-L metric.\n\n Args:\n pred: A predicted sentence.\n target: A target sentence.\n \"\"\"\n pred_len, target_len = len(pred), len(target)\n if 0 in (pred_len, target_len):\n return {\"precision\": tensor(0.0), \"recall\": tensor(0.0), \"fmeasure\": tensor(0.0)}\n\n lcs: int = _lcs(pred, target) # type: ignore\n return _compute_metrics(lcs, pred_len, target_len)\n\n\ndef _rouge_lsum_score(pred: Sequence[Sequence[str]], target: Sequence[Sequence[str]]) -> Dict[str, Tensor]:\n r\"\"\"Compute precision, recall and F1 score for the Rouge-LSum metric.\n\n More information can be found in Section 3.2 of the referenced paper [1]. This implementation follow the official\n implementation from:\n https://github.com/google-research/google-research/blob/master/rouge/rouge_scorer.py.\n\n Args:\n pred: An iterable of predicted sentence split by '\\n'.\n target: An iterable target sentence split by '\\n'.\n\n References:\n [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin. https://aclanthology.org/W04-1013/\n \"\"\"\n pred_len = sum(map(len, pred))\n target_len = sum(map(len, target))\n if 0 in (pred_len, target_len):\n return {\"precision\": tensor(0.0), \"recall\": tensor(0.0), \"fmeasure\": tensor(0.0)}\n\n # Get token counts\n def _get_token_counts(sentences: Sequence[Sequence[str]]) -> Counter:\n ngrams: Counter = Counter()\n for sentence in sentences:\n ngrams.update(sentence)\n return ngrams\n\n pred_tokens_count = _get_token_counts(pred)\n target_tokens_count = _get_token_counts(target)\n\n # Calculate hits\n hits = 0\n for tgt in target:\n lcs = _union_lcs(pred, tgt)\n for token in lcs:\n if pred_tokens_count[token] > 0 and target_tokens_count[token] > 0:\n hits += 1\n pred_tokens_count[token] -= 1\n target_tokens_count[token] -= 1\n\n return _compute_metrics(hits, pred_len, target_len)\n\n\ndef _rouge_score_update(\n preds: Sequence[str],\n target: Sequence[Sequence[str]],\n rouge_keys_values: List[Union[int, str]],\n accumulate: str,\n stemmer: Optional[Any] = None,\n normalizer: Optional[Callable[[str], str]] = None,\n tokenizer: Optional[Callable[[str], Sequence[str]]] = None,\n) -> Dict[Union[int, str], List[Dict[str, Tensor]]]:\n \"\"\"Update the rouge score with the current set of predicted and target sentences.\n\n Args:\n preds: An iterable of predicted sentences.\n target: An iterable of iterable of target sentences.\n rouge_keys_values: List of N-grams/'L'/'Lsum' arguments.\n accumulate: Useful incase of multi-reference rouge score.\n ``avg`` takes the avg of all references with respect to predictions\n ``best`` takes the best fmeasure score obtained between prediction and multiple corresponding references.\n Allowed values are ``avg`` and ``best``.\n stemmer: Porter stemmer instance to strip word suffixes to improve matching.\n normalizer:\n A user's own normalizer function.\n If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.\n This function must take a `str` and return a `str`.\n tokenizer:\n A user's own tokenizer function. If this is ``None``, spliting by spaces is default\n This function must take a `str` and return `Sequence[str]`\n\n Example:\n >>> preds = \"My name is John\".split()\n >>> target = \"Is your name John\".split()\n >>> from pprint import pprint\n >>> score = _rouge_score_update(preds, target, rouge_keys_values=[1, 2, 3, 'L'], accumulate='best')\n >>> pprint(score)\n {1: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}],\n 2: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}],\n 3: [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}],\n 'L': [{'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)},\n {'fmeasure': tensor(0.), 'precision': tensor(0.), 'recall': tensor(0.)}]}\n \"\"\"\n results: Dict[Union[int, str], List[Dict[str, Tensor]]] = {rouge_key: [] for rouge_key in rouge_keys_values}\n\n for pred_raw, target_raw in zip(preds, target):\n result_inner: Dict[Union[int, str], Dict[str, Tensor]] = {rouge_key: {} for rouge_key in rouge_keys_values}\n result_avg: Dict[Union[int, str], List[Dict[str, Tensor]]] = {rouge_key: [] for rouge_key in rouge_keys_values}\n list_results = []\n pred = _normalize_and_tokenize_text(pred_raw, stemmer, normalizer, tokenizer)\n if \"Lsum\" in rouge_keys_values:\n pred_lsum = [\n _normalize_and_tokenize_text(pred_sentence, stemmer, normalizer, tokenizer)\n for pred_sentence in _split_sentence(pred_raw)\n ]\n\n for target_raw_inner in target_raw:\n tgt = _normalize_and_tokenize_text(target_raw_inner, stemmer, normalizer, tokenizer)\n\n if \"Lsum\" in rouge_keys_values:\n target_lsum = [\n _normalize_and_tokenize_text(tgt_sentence, stemmer, normalizer, tokenizer)\n for tgt_sentence in _split_sentence(target_raw_inner)\n ]\n\n for rouge_key in rouge_keys_values:\n if isinstance(rouge_key, int):\n score = _rouge_n_score(pred, tgt, rouge_key)\n elif rouge_key == \"L\":\n score = _rouge_l_score(pred, tgt)\n elif rouge_key == \"Lsum\":\n score = _rouge_lsum_score(pred_lsum, target_lsum)\n result_inner[rouge_key] = score\n result_avg[rouge_key].append(score)\n list_results.append(result_inner.copy())\n\n if accumulate == \"best\":\n key_curr = rouge_keys_values[0]\n all_fmeasure = torch.tensor([v[key_curr][\"fmeasure\"] for v in list_results])\n highest_idx = int(torch.argmax(all_fmeasure).item())\n\n for rouge_key in rouge_keys_values:\n results[rouge_key].append(list_results[highest_idx][rouge_key])\n\n elif accumulate == \"avg\":\n new_result_avg: Dict[Union[int, str], Dict[str, Tensor]] = {\n rouge_key: {} for rouge_key in rouge_keys_values\n }\n for rouge_key, metrics in result_avg.items():\n _dict_metric_score_batch: Dict[str, List[Tensor]] = {}\n for metric in metrics:\n for _type, value in metric.items():\n if _type not in _dict_metric_score_batch:\n _dict_metric_score_batch[_type] = []\n _dict_metric_score_batch[_type].append(value)\n\n new_result_avg[rouge_key] = {\n _type: torch.tensor(_dict_metric_score_batch[_type]).mean() for _type in _dict_metric_score_batch\n }\n\n for rouge_key in rouge_keys_values:\n results[rouge_key].append(new_result_avg[rouge_key])\n\n return results\n\n\ndef _rouge_score_compute(sentence_results: Dict[str, List[Tensor]]) -> Dict[str, Tensor]:\n \"\"\"Compute the combined ROUGE metric for all the input set of predicted and target sentences.\n\n Args:\n sentence_results: Rouge-N/Rouge-L/Rouge-LSum metrics calculated for single sentence.\n \"\"\"\n results: Dict[str, Tensor] = {}\n # Obtain mean scores for individual rouge metrics\n if sentence_results == {}:\n return results\n\n for rouge_key, scores in sentence_results.items():\n results[rouge_key] = torch.tensor(scores).mean()\n\n return results\n\n\ndef rouge_score(\n preds: Union[str, Sequence[str]],\n target: Union[str, Sequence[str], Sequence[Sequence[str]]],\n accumulate: Literal[\"avg\", \"best\"] = \"best\",\n use_stemmer: bool = False,\n normalizer: Optional[Callable[[str], str]] = None,\n tokenizer: Optional[Callable[[str], Sequence[str]]] = None,\n rouge_keys: Union[str, Tuple[str, ...]] = (\"rouge1\", \"rouge2\", \"rougeL\", \"rougeLsum\"),\n) -> Dict[str, Tensor]:\n \"\"\"Calculate `Calculate Rouge Score`_ , used for automatic summarization.\n\n Args:\n preds: An iterable of predicted sentences or a single predicted sentence.\n target:\n An iterable of iterables of target sentences or an iterable of target sentences or a single target sentence.\n accumulate:\n Useful incase of multi-reference rouge score.\n\n - ``avg`` takes the avg of all references with respect to predictions\n - ``best`` takes the best fmeasure score obtained between prediction and multiple corresponding references.\n\n use_stemmer: Use Porter stemmer to strip word suffixes to improve matching.\n normalizer: A user's own normalizer function.\n If this is ``None``, replacing any non-alpha-numeric characters with spaces is default.\n This function must take a ``str`` and return a ``str``.\n tokenizer: A user's own tokenizer function. If this is ``None``, spliting by spaces is default\n This function must take a ``str`` and return ``Sequence[str]``\n rouge_keys: A list of rouge types to calculate.\n Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``.\n\n Return:\n Python dictionary of rouge scores for each input rouge key.\n\n Example:\n >>> from torchmetrics.functional.text.rouge import rouge_score\n >>> preds = \"My name is John\"\n >>> target = \"Is your name John\"\n >>> from pprint import pprint\n >>> pprint(rouge_score(preds, target))\n {'rouge1_fmeasure': tensor(0.7500),\n 'rouge1_precision': tensor(0.7500),\n 'rouge1_recall': tensor(0.7500),\n 'rouge2_fmeasure': tensor(0.),\n 'rouge2_precision': tensor(0.),\n 'rouge2_recall': tensor(0.),\n 'rougeL_fmeasure': tensor(0.5000),\n 'rougeL_precision': tensor(0.5000),\n 'rougeL_recall': tensor(0.5000),\n 'rougeLsum_fmeasure': tensor(0.5000),\n 'rougeLsum_precision': tensor(0.5000),\n 'rougeLsum_recall': tensor(0.5000)}\n\n\n Raises:\n ModuleNotFoundError:\n If the python package ``nltk`` is not installed.\n ValueError:\n If any of the ``rouge_keys`` does not belong to the allowed set of keys.\n\n References:\n [1] ROUGE: A Package for Automatic Evaluation of Summaries by Chin-Yew Lin. https://aclanthology.org/W04-1013/\n \"\"\"\n if use_stemmer:\n if not _NLTK_AVAILABLE:\n raise ModuleNotFoundError(\"Stemmer requires that `nltk` is installed. Use `pip install nltk`.\")\n import nltk\n\n stemmer = nltk.stem.porter.PorterStemmer() if use_stemmer else None\n\n if not isinstance(rouge_keys, tuple):\n rouge_keys = (rouge_keys,)\n for key in rouge_keys:\n if key not in ALLOWED_ROUGE_KEYS.keys():\n raise ValueError(f\"Got unknown rouge key {key}. Expected to be one of {list(ALLOWED_ROUGE_KEYS.keys())}\")\n rouge_keys_values = [ALLOWED_ROUGE_KEYS[key] for key in rouge_keys]\n\n if isinstance(target, list) and all(isinstance(tgt, str) for tgt in target):\n target = [target] if isinstance(preds, str) else [[tgt] for tgt in target]\n\n if isinstance(preds, str):\n preds = [preds]\n\n if isinstance(target, str):\n target = [[target]]\n\n sentence_results: Dict[Union[int, str], List[Dict[str, Tensor]]] = _rouge_score_update(\n preds,\n target,\n rouge_keys_values,\n stemmer=stemmer,\n normalizer=normalizer,\n tokenizer=tokenizer,\n accumulate=accumulate,\n )\n\n output: Dict[str, List[Tensor]] = {}\n for rouge_key in rouge_keys_values:\n for tp in [\"fmeasure\", \"precision\", \"recall\"]:\n output[f\"rouge{rouge_key}_{tp}\"] = []\n\n for rouge_key, metrics in sentence_results.items():\n for metric in metrics:\n for tp, value in metric.items():\n output[f\"rouge{rouge_key}_{tp}\"].append(value)\n\n return _rouge_score_compute(output)\n", "path": "src/torchmetrics/functional/text/rouge.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index aac7236fb06..a8901c43b93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -189,6 +189,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed use of `prefix` and `postfix` in nested `MetricCollection` ([#1773](https://github.com/Lightning-AI/torchmetrics/pull/1773)) +- Fixed lookup for punkt sources being downloaded in `RougeScore` [#1789](https://github.com/Lightning-AI/torchmetrics/pull/1789) + ## [0.11.4] - 2023-03-10 ### Fixed diff --git a/src/torchmetrics/functional/text/rouge.py b/src/torchmetrics/functional/text/rouge.py index cfeee0d5874..015d7014ff6 100644 --- a/src/torchmetrics/functional/text/rouge.py +++ b/src/torchmetrics/functional/text/rouge.py @@ -49,7 +49,7 @@ def _ensure_nltk_punkt_is_downloaded() -> None: import nltk try: - nltk.data.find("tokenizers/punkt.zip") + nltk.data.find("tokenizers/punkt") except LookupError: try: nltk.download("punkt", quiet=True, force=False, halt_on_error=False, raise_on_error=True)
microsoft__botbuilder-python-2117
valid_token_issuers incorrectly populated during SingleTenant validation ### [Github issues](https://github.com/Microsoft/botbuilder-python) should be used for bugs and feature requests. Use [Stack Overflow](https://stackoverflow.com/questions/tagged/botframework) for general "how-to" questions. ## Version 4.15.0 ## Describe the bug Tenant specific token issuers (valid_token_issuers) are added as a nested element, instead of individual issuers. ## To Reproduce Steps to reproduce the behavior: Authenticating with app_type 'SingleTenant' will result in unauthorized requests. ## Expected behavior Tenant specific token issues are treated as valid.
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom datetime import timedelta\nfrom typing import Dict, Union\n\nimport jwt\n\nfrom .authentication_configuration import AuthenticationConfiguration\nfrom .authentication_constants import AuthenticationConstants\nfrom .claims_identity import ClaimsIdentity\nfrom .credential_provider import CredentialProvider\nfrom .government_constants import GovernmentConstants\nfrom .verify_options import VerifyOptions\nfrom .jwt_token_extractor import JwtTokenExtractor\nfrom .channel_provider import ChannelProvider\n\n\nclass SkillValidation:\n # TODO: Remove circular dependcies after C# refactor\n # pylint: disable=import-outside-toplevel\n\n \"\"\"\n Validates JWT tokens sent to and from a Skill.\n \"\"\"\n\n @staticmethod\n def is_skill_token(auth_header: str) -> bool:\n \"\"\"\n Determines if a given Auth header is from from a skill to bot or bot to skill request.\n :param auth_header: Bearer Token, in the \"Bearer [Long String]\" Format.\n :return bool:\n \"\"\"\n from .jwt_token_validation import JwtTokenValidation\n\n if not JwtTokenValidation.is_valid_token_format(auth_header):\n return False\n\n bearer_token = auth_header.split(\" \")[1]\n\n # Parse the Big Long String into an actual token.\n token = jwt.decode(bearer_token, options={\"verify_signature\": False})\n return SkillValidation.is_skill_claim(token)\n\n @staticmethod\n def is_skill_claim(claims: Dict[str, object]) -> bool:\n \"\"\"\n Checks if the given list of claims represents a skill.\n :param claims: A dict of claims.\n :return bool:\n \"\"\"\n if (\n claims.get(AuthenticationConstants.APP_ID_CLAIM, None)\n == AuthenticationConstants.ANONYMOUS_SKILL_APP_ID\n ):\n return True\n\n if AuthenticationConstants.VERSION_CLAIM not in claims:\n return False\n\n audience = claims.get(AuthenticationConstants.AUDIENCE_CLAIM)\n\n # The audience is https://api.botframework.com and not an appId.\n if (\n not audience\n or audience == AuthenticationConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER\n ):\n return False\n\n from .jwt_token_validation import JwtTokenValidation\n\n app_id = JwtTokenValidation.get_app_id_from_claims(claims)\n\n if not app_id:\n return False\n\n # Skill claims must contain and app ID and the AppID must be different than the audience.\n return app_id != audience\n\n @staticmethod\n async def authenticate_channel_token(\n auth_header: str,\n credentials: CredentialProvider,\n channel_service_or_provider: Union[str, ChannelProvider],\n channel_id: str,\n auth_configuration: AuthenticationConfiguration,\n ) -> ClaimsIdentity:\n if auth_configuration is None:\n raise Exception(\n \"auth_configuration cannot be None in SkillValidation.authenticate_channel_token\"\n )\n\n from .jwt_token_validation import JwtTokenValidation\n\n if isinstance(channel_service_or_provider, ChannelProvider):\n is_gov = channel_service_or_provider.is_government()\n else:\n is_gov = JwtTokenValidation.is_government(channel_service_or_provider)\n\n open_id_metadata_url = (\n GovernmentConstants.TO_BOT_FROM_EMULATOR_OPENID_METADATA_URL\n if is_gov\n else AuthenticationConstants.TO_BOT_FROM_EMULATOR_OPENID_METADATA_URL\n )\n\n token_validation_parameters = VerifyOptions(\n issuer=[\n \"https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/\", # v3.1, 1.0 token\n \"https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0\", # v3.1, 2.0 token\n \"https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\", # v3.2, 1.0 token\n \"https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\", # v3.2, 2.0 token\n \"https://sts.windows.net/cab8a31a-1906-4287-a0d8-4eef66b95f6e/\", # US Gov, 1.0 token\n \"https://login.microsoftonline.us/cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0\", # US Gov, 2.0 token\n \"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\", # US Gov, 1.0 token\n \"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\", # US Gov, 2.0 token\n ],\n audience=None,\n clock_tolerance=timedelta(minutes=5),\n ignore_expiration=False,\n )\n\n if auth_configuration.valid_token_issuers:\n token_validation_parameters.issuer.append(\n auth_configuration.valid_token_issuers\n )\n\n token_extractor = JwtTokenExtractor(\n token_validation_parameters,\n open_id_metadata_url,\n AuthenticationConstants.ALLOWED_SIGNING_ALGORITHMS,\n )\n\n identity = await token_extractor.get_identity_from_auth_header(\n auth_header, channel_id, auth_configuration.required_endorsements\n )\n await SkillValidation._validate_identity(identity, credentials)\n\n return identity\n\n @staticmethod\n def create_anonymous_skill_claim():\n \"\"\"\n Creates a ClaimsIdentity for an anonymous (unauthenticated) skill.\n :return ClaimsIdentity:\n \"\"\"\n return ClaimsIdentity(\n {\n AuthenticationConstants.APP_ID_CLAIM: AuthenticationConstants.ANONYMOUS_SKILL_APP_ID\n },\n True,\n AuthenticationConstants.ANONYMOUS_AUTH_TYPE,\n )\n\n @staticmethod\n async def _validate_identity(\n identity: ClaimsIdentity, credentials: CredentialProvider\n ):\n if not identity:\n # No valid identity. Not Authorized.\n raise PermissionError(\"Invalid Identity\")\n\n if not identity.is_authenticated:\n # The token is in some way invalid. Not Authorized.\n raise PermissionError(\"Token Not Authenticated\")\n\n version_claim = identity.claims.get(AuthenticationConstants.VERSION_CLAIM)\n if not version_claim:\n # No version claim\n raise PermissionError(\n f\"'{AuthenticationConstants.VERSION_CLAIM}' claim is required on skill Tokens.\"\n )\n\n # Look for the \"aud\" claim, but only if issued from the Bot Framework\n audience_claim = identity.claims.get(AuthenticationConstants.AUDIENCE_CLAIM)\n if not audience_claim:\n # Claim is not present or doesn't have a value. Not Authorized.\n raise PermissionError(\n f\"'{AuthenticationConstants.AUDIENCE_CLAIM}' claim is required on skill Tokens.\"\n )\n\n if not await credentials.is_valid_appid(audience_claim):\n # The AppId is not valid. Not Authorized.\n raise PermissionError(\"Invalid audience.\")\n\n from .jwt_token_validation import JwtTokenValidation\n\n app_id = JwtTokenValidation.get_app_id_from_claims(identity.claims)\n if not app_id:\n # Invalid AppId\n raise PermissionError(\"Invalid app_id.\")\n", "path": "libraries/botframework-connector/botframework/connector/auth/skill_validation.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom datetime import timedelta\nfrom typing import Dict, Union\n\nimport jwt\n\nfrom .authentication_configuration import AuthenticationConfiguration\nfrom .authentication_constants import AuthenticationConstants\nfrom .claims_identity import ClaimsIdentity\nfrom .credential_provider import CredentialProvider\nfrom .government_constants import GovernmentConstants\nfrom .verify_options import VerifyOptions\nfrom .jwt_token_extractor import JwtTokenExtractor\nfrom .channel_provider import ChannelProvider\n\n\nclass SkillValidation:\n # TODO: Remove circular dependcies after C# refactor\n # pylint: disable=import-outside-toplevel\n\n \"\"\"\n Validates JWT tokens sent to and from a Skill.\n \"\"\"\n\n @staticmethod\n def is_skill_token(auth_header: str) -> bool:\n \"\"\"\n Determines if a given Auth header is from from a skill to bot or bot to skill request.\n :param auth_header: Bearer Token, in the \"Bearer [Long String]\" Format.\n :return bool:\n \"\"\"\n from .jwt_token_validation import JwtTokenValidation\n\n if not JwtTokenValidation.is_valid_token_format(auth_header):\n return False\n\n bearer_token = auth_header.split(\" \")[1]\n\n # Parse the Big Long String into an actual token.\n token = jwt.decode(bearer_token, options={\"verify_signature\": False})\n return SkillValidation.is_skill_claim(token)\n\n @staticmethod\n def is_skill_claim(claims: Dict[str, object]) -> bool:\n \"\"\"\n Checks if the given list of claims represents a skill.\n :param claims: A dict of claims.\n :return bool:\n \"\"\"\n if (\n claims.get(AuthenticationConstants.APP_ID_CLAIM, None)\n == AuthenticationConstants.ANONYMOUS_SKILL_APP_ID\n ):\n return True\n\n if AuthenticationConstants.VERSION_CLAIM not in claims:\n return False\n\n audience = claims.get(AuthenticationConstants.AUDIENCE_CLAIM)\n\n # The audience is https://api.botframework.com and not an appId.\n if (\n not audience\n or audience == AuthenticationConstants.TO_BOT_FROM_CHANNEL_TOKEN_ISSUER\n ):\n return False\n\n from .jwt_token_validation import JwtTokenValidation\n\n app_id = JwtTokenValidation.get_app_id_from_claims(claims)\n\n if not app_id:\n return False\n\n # Skill claims must contain and app ID and the AppID must be different than the audience.\n return app_id != audience\n\n @staticmethod\n async def authenticate_channel_token(\n auth_header: str,\n credentials: CredentialProvider,\n channel_service_or_provider: Union[str, ChannelProvider],\n channel_id: str,\n auth_configuration: AuthenticationConfiguration,\n ) -> ClaimsIdentity:\n if auth_configuration is None:\n raise Exception(\n \"auth_configuration cannot be None in SkillValidation.authenticate_channel_token\"\n )\n\n from .jwt_token_validation import JwtTokenValidation\n\n if isinstance(channel_service_or_provider, ChannelProvider):\n is_gov = channel_service_or_provider.is_government()\n else:\n is_gov = JwtTokenValidation.is_government(channel_service_or_provider)\n\n open_id_metadata_url = (\n GovernmentConstants.TO_BOT_FROM_EMULATOR_OPENID_METADATA_URL\n if is_gov\n else AuthenticationConstants.TO_BOT_FROM_EMULATOR_OPENID_METADATA_URL\n )\n\n token_validation_parameters = VerifyOptions(\n issuer=[\n \"https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/\", # v3.1, 1.0 token\n \"https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0\", # v3.1, 2.0 token\n \"https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\", # v3.2, 1.0 token\n \"https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\", # v3.2, 2.0 token\n \"https://sts.windows.net/cab8a31a-1906-4287-a0d8-4eef66b95f6e/\", # US Gov, 1.0 token\n \"https://login.microsoftonline.us/cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0\", # US Gov, 2.0 token\n \"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\", # US Gov, 1.0 token\n \"https://login.microsoftonline.us/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\", # US Gov, 2.0 token\n ],\n audience=None,\n clock_tolerance=timedelta(minutes=5),\n ignore_expiration=False,\n )\n\n if auth_configuration.valid_token_issuers:\n token_validation_parameters.issuer.extend(\n auth_configuration.valid_token_issuers\n )\n\n token_extractor = JwtTokenExtractor(\n token_validation_parameters,\n open_id_metadata_url,\n AuthenticationConstants.ALLOWED_SIGNING_ALGORITHMS,\n )\n\n identity = await token_extractor.get_identity_from_auth_header(\n auth_header, channel_id, auth_configuration.required_endorsements\n )\n await SkillValidation._validate_identity(identity, credentials)\n\n return identity\n\n @staticmethod\n def create_anonymous_skill_claim():\n \"\"\"\n Creates a ClaimsIdentity for an anonymous (unauthenticated) skill.\n :return ClaimsIdentity:\n \"\"\"\n return ClaimsIdentity(\n {\n AuthenticationConstants.APP_ID_CLAIM: AuthenticationConstants.ANONYMOUS_SKILL_APP_ID\n },\n True,\n AuthenticationConstants.ANONYMOUS_AUTH_TYPE,\n )\n\n @staticmethod\n async def _validate_identity(\n identity: ClaimsIdentity, credentials: CredentialProvider\n ):\n if not identity:\n # No valid identity. Not Authorized.\n raise PermissionError(\"Invalid Identity\")\n\n if not identity.is_authenticated:\n # The token is in some way invalid. Not Authorized.\n raise PermissionError(\"Token Not Authenticated\")\n\n version_claim = identity.claims.get(AuthenticationConstants.VERSION_CLAIM)\n if not version_claim:\n # No version claim\n raise PermissionError(\n f\"'{AuthenticationConstants.VERSION_CLAIM}' claim is required on skill Tokens.\"\n )\n\n # Look for the \"aud\" claim, but only if issued from the Bot Framework\n audience_claim = identity.claims.get(AuthenticationConstants.AUDIENCE_CLAIM)\n if not audience_claim:\n # Claim is not present or doesn't have a value. Not Authorized.\n raise PermissionError(\n f\"'{AuthenticationConstants.AUDIENCE_CLAIM}' claim is required on skill Tokens.\"\n )\n\n if not await credentials.is_valid_appid(audience_claim):\n # The AppId is not valid. Not Authorized.\n raise PermissionError(\"Invalid audience.\")\n\n from .jwt_token_validation import JwtTokenValidation\n\n app_id = JwtTokenValidation.get_app_id_from_claims(identity.claims)\n if not app_id:\n # Invalid AppId\n raise PermissionError(\"Invalid app_id.\")\n", "path": "libraries/botframework-connector/botframework/connector/auth/skill_validation.py" } ]
diff --git a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py index b708e27cb..8c35f1b0a 100644 --- a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py +++ b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py @@ -120,7 +120,7 @@ async def authenticate_channel_token( ) if auth_configuration.valid_token_issuers: - token_validation_parameters.issuer.append( + token_validation_parameters.issuer.extend( auth_configuration.valid_token_issuers )
python-poetry__poetry-438
Trailing newline not included in profile `export PATH` (new style installer) - **OS version and name**: Debian Buster - **Poetry version**: 0.12.0a1 ## Issue I installed Poetry with the new style installer : ``` curl -sSL -O https://raw.githubusercontent.com/sdispater/poetry/develop/get-poetry.py python3 get-poetry.py --preview ``` The installation went well, but the line added to the shell startup files for updating the `PATH` environment variable doesn't end with a newline.
[ { "content": "\"\"\"\nThis script will install poetry and its dependencies\nin isolation from the rest of the system.\n\nIt does, in order:\n\n - Downloads the latest stable (or pre-release) version of poetry.\n - Downloads all its dependencies in the poetry/_vendor directory.\n - Copies it and all extra files in $POETRY_HOME.\n - Updates the PATH in a system-specific way.\n\nThere will be a `poetry` script that will be installed in $POETRY_HOME/bin\nwhich will act as the poetry command but is slightly different in the sense\nthat it will use the current Python installation.\n\nWhat this means is that one Poetry installation can serve for multiple\nPython versions.\n\"\"\"\nimport argparse\nimport hashlib\nimport json\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\n\nfrom contextlib import closing\nfrom contextlib import contextmanager\nfrom functools import cmp_to_key\nfrom gzip import GzipFile\nfrom io import UnsupportedOperation\n\ntry:\n from urllib.error import HTTPError\n from urllib.request import Request\n from urllib.request import urlopen\nexcept ImportError:\n from urllib2 import HTTPError\n from urllib2 import Request\n from urllib2 import urlopen\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\ntry:\n try:\n import winreg\n except ImportError:\n import _winreg as winreg\nexcept ImportError:\n winreg = None\n\n\nWINDOWS = sys.platform.startswith(\"win\") or (sys.platform == \"cli\" and os.name == \"nt\")\n\n\nFOREGROUND_COLORS = {\n \"black\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n}\n\nBACKGROUND_COLORS = {\n \"black\": 40,\n \"red\": 41,\n \"green\": 42,\n \"yellow\": 43,\n \"blue\": 44,\n \"magenta\": 45,\n \"cyan\": 46,\n \"white\": 47,\n}\n\nOPTIONS = {\"bold\": 1, \"underscore\": 4, \"blink\": 5, \"reverse\": 7, \"conceal\": 8}\n\n\ndef style(fg, bg, options):\n codes = []\n\n if fg:\n codes.append(FOREGROUND_COLORS[fg])\n\n if bg:\n codes.append(BACKGROUND_COLORS[bg])\n\n if options:\n if not isinstance(options, (list, tuple)):\n options = [options]\n\n for option in options:\n codes.append(OPTIONS[option])\n\n return \"\\033[{}m\".format(\";\".join(map(str, codes)))\n\n\nSTYLES = {\n \"info\": style(\"green\", None, None),\n \"comment\": style(\"yellow\", None, None),\n \"error\": style(\"red\", None, None),\n \"warning\": style(\"yellow\", None, None),\n}\n\n\ndef is_decorated():\n if platform.system().lower() == \"windows\":\n return (\n os.getenv(\"ANSICON\") is not None\n or \"ON\" == os.getenv(\"ConEmuANSI\")\n or \"xterm\" == os.getenv(\"Term\")\n )\n\n if not hasattr(sys.stdout, \"fileno\"):\n return False\n\n try:\n return os.isatty(sys.stdout.fileno())\n except UnsupportedOperation:\n return False\n\n\ndef colorize(style, text):\n if not is_decorated():\n return text\n\n return \"{}{}\\033[0m\".format(STYLES[style], text)\n\n\n@contextmanager\ndef temporary_directory(*args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n\n\ndef expanduser(path):\n \"\"\"\n Expand ~ and ~user constructions.\n\n Includes a workaround for http://bugs.python.org/issue14768\n \"\"\"\n expanded = os.path.expanduser(path)\n if path.startswith(\"~/\") and expanded.startswith(\"//\"):\n expanded = expanded[1:]\n\n return expanded\n\n\nHOME = expanduser(\"~\")\nPOETRY_HOME = os.path.join(HOME, \".poetry\")\nPOETRY_BIN = os.path.join(POETRY_HOME, \"bin\")\nPOETRY_ENV = os.path.join(POETRY_HOME, \"env\")\nPOETRY_LIB = os.path.join(POETRY_HOME, \"lib\")\nPOETRY_LIB_BACKUP = os.path.join(POETRY_HOME, \"lib-backup\")\n\n\nBIN = \"\"\"#!/usr/bin/env python\nimport glob\nimport sys\nimport os\n\nlib = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"lib\"))\n\nsys.path.insert(0, lib)\n\nif __name__ == \"__main__\":\n from poetry.console import main\n\n main()\n\"\"\"\n\nBAT = \"@echo off\\r\\npython %USERPROFILE%/.poetry/bin/poetry %*\\r\\n\"\n\n\nPRE_MESSAGE = \"\"\"# Welcome to {poetry}!\n\nThis will download and install the latest version of {poetry},\na dependency and package manager for Python.\n\nIt will add the `poetry` command to {poetry}'s bin directory, located at:\n\n{poetry_home_bin}\n\n{platform_msg}\n\nYou can uninstall at any time with `poetry self:uninstall`,\nor by executing this script with the --uninstall option,\nand these changes will be reverted.\n\"\"\"\n\nPRE_UNINSTALL_MESSAGE = \"\"\"# We are sorry to see you go!\n\nThis will uninstall {poetry}.\n\nIt will remove the `poetry` command from {poetry}'s bin directory, located at:\n\n{poetry_home_bin}\n\nThis will also remove {poetry} from your system's PATH.\n\"\"\"\n\n\nPRE_MESSAGE_UNIX = \"\"\"This path will then be added to your `PATH` environment variable by\nmodifying the profile file{plural} located at:\n\n{rcfiles}\"\"\"\n\n\nPRE_MESSAGE_WINDOWS = \"\"\"This path will then be added to your `PATH` environment variable by\nmodifying the `HKEY_CURRENT_USER/Environment/PATH` registry key.\"\"\"\n\nPRE_MESSAGE_NO_MODIFY_PATH = \"\"\"This path needs to be in your `PATH` environment variable,\nbut will not be added automatically.\"\"\"\n\nPOST_MESSAGE_UNIX = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need {poetry}'s bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. Next time you log in this will be done\nautomatically.\n\nTo configure your current shell run `source {poetry_home_env}`\n\"\"\"\n\nPOST_MESSAGE_WINDOWS = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need Poetry's bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. Future applications will automatically have the\ncorrect environment, but you may need to restart your current shell.\n\"\"\"\n\nPOST_MESSAGE_UNIX_NO_MODIFY_PATH = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need {poetry}'s bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable.\n\nTo configure your current shell run `source {poetry_home_env}`\n\"\"\"\n\nPOST_MESSAGE_WINDOWS_NO_MODIFY_PATH = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need Poetry's bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. This has not been done automatically.\n\"\"\"\n\n\nclass Installer:\n\n CURRENT_PYTHON = sys.executable\n CURRENT_PYTHON_VERSION = sys.version_info[:2]\n METADATA_URL = \"https://api.github.com/repos/sdispater/poetry/releases\"\n VERSION_REGEX = re.compile(\n \"v?(\\d+)(?:\\.(\\d+))?(?:\\.(\\d+))?(?:\\.(\\d+))?\"\n \"(\"\n \"[._-]?\"\n \"(?:(stable|beta|b|RC|alpha|a|patch|pl|p)((?:[.-]?\\d+)*)?)?\"\n \"([.-]?dev)?\"\n \")?\"\n \"(?:\\+[^\\s]+)?\"\n )\n\n BASE_URL = \"https://github.com/sdispater/poetry/releases/download/\"\n\n def __init__(\n self,\n version=None,\n preview=False,\n force=False,\n accept_all=False,\n base_url=BASE_URL,\n ):\n self._version = version\n self._preview = preview\n self._force = force\n self._modify_path = True\n self._accept_all = accept_all\n self._base_url = base_url\n\n def allows_prereleases(self):\n return self._preview\n\n def run(self):\n version, current_version = self.get_version()\n\n if version is None:\n return 0\n\n self.customize_install()\n self.display_pre_message()\n self.ensure_home()\n\n try:\n self.install(version, upgrade=current_version is not None)\n except subprocess.CalledProcessError as e:\n print(colorize(\"error\", \"An error has occured: {}\".format(str(e))))\n print(e.output.decode())\n\n return e.returncode\n\n self.display_post_message(version)\n\n return 0\n\n def uninstall(self):\n self.display_pre_uninstall_message()\n\n if not self.customize_uninstall():\n return\n\n self.remove_home()\n self.remove_from_path()\n\n def get_version(self):\n print(colorize(\"info\", \"Retrieving Poetry metadata\"))\n\n metadata = json.loads(self._get(self.METADATA_URL))\n\n def _compare_versions(x, y):\n mx = self.VERSION_REGEX.match(x)\n my = self.VERSION_REGEX.match(y)\n\n vx = tuple(int(p) for p in mx.groups()[:3]) + (mx.group(5),)\n vy = tuple(int(p) for p in my.groups()[:3]) + (my.group(5),)\n\n if vx < vy:\n return -1\n elif vx > vy:\n return 1\n\n return 0\n\n print(\"\")\n releases = sorted(\n [m[\"tag_name\"] for m in metadata], key=cmp_to_key(_compare_versions)\n )\n\n if self._version and self._version not in releases:\n print(colorize(\"error\", \"Version {} does not exist.\".format(self._version)))\n\n return None, None\n\n version = self._version\n if not version:\n for release in reversed(releases):\n m = self.VERSION_REGEX.match(release)\n if m.group(5) and not self.allows_prereleases():\n continue\n\n version = release\n\n break\n\n current_version = None\n if os.path.exists(POETRY_LIB):\n with open(os.path.join(POETRY_LIB, \"poetry\", \"__version__.py\")) as f:\n version_content = f.read()\n\n current_version_re = re.match(\n '(?ms).*__version__ = \"(.+)\".*', version_content\n )\n if not current_version_re:\n print(\n colorize(\n \"warning\",\n \"Unable to get the current Poetry version. Assuming None\",\n )\n )\n else:\n current_version = current_version_re.group(1)\n\n if current_version == version and not self._force:\n print(\"Latest version already installed.\")\n return None, current_version\n\n return version, current_version\n\n def customize_install(self):\n if not self._accept_all:\n print(\"Before we start, please answer the following questions.\")\n print(\"You may simple press the Enter key to keave unchanged.\")\n\n modify_path = input(\"Modify PATH variable? ([y]/n) \") or \"y\"\n if modify_path.lower() in {\"n\", \"no\"}:\n self._modify_path = False\n\n print(\"\")\n\n def customize_uninstall(self):\n if not self._accept_all:\n print()\n\n uninstall = (\n input(\"Are you sure you want to uninstall Poetry? (y/[n]) \") or \"n\"\n )\n if uninstall.lower() not in {\"y\", \"yes\"}:\n return False\n\n print(\"\")\n\n return True\n\n def customize_install(self):\n if not self._accept_all:\n print(\"Before we start, please answer the following questions.\")\n print(\"You may simple press the Enter key to keave unchanged.\")\n\n modify_path = input(\"Modify PATH variable? ([y]/n) \") or \"y\"\n if modify_path.lower() in {\"n\", \"no\"}:\n self._modify_path = False\n\n print(\"\")\n\n def ensure_home(self):\n \"\"\"\n Ensures that $POETRY_HOME exists or create it.\n \"\"\"\n if not os.path.exists(POETRY_HOME):\n os.mkdir(POETRY_HOME, 0o755)\n\n def remove_home(self):\n \"\"\"\n Removes $POETRY_HOME.\n \"\"\"\n if not os.path.exists(POETRY_HOME):\n return\n\n shutil.rmtree(POETRY_HOME)\n\n def install(self, version, upgrade=False):\n \"\"\"\n Installs Poetry in $POETRY_HOME.\n \"\"\"\n print(\"Installing version: \" + colorize(\"info\", version))\n\n self.make_lib(version)\n self.make_bin()\n self.make_env()\n self.update_path()\n\n return 0\n\n def make_lib(self, version):\n \"\"\"\n Packs everything into a single lib/ directory.\n \"\"\"\n if os.path.exists(POETRY_LIB_BACKUP):\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n # Backup the current installation\n if os.path.exists(POETRY_LIB):\n shutil.copytree(POETRY_LIB, POETRY_LIB_BACKUP)\n shutil.rmtree(POETRY_LIB)\n\n try:\n self._make_lib(version)\n except Exception:\n if not os.path.exists(POETRY_LIB_BACKUP):\n raise\n\n shutil.copytree(POETRY_LIB_BACKUP, POETRY_LIB)\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n raise\n finally:\n if os.path.exists(POETRY_LIB_BACKUP):\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n def _make_lib(self, version):\n # We get the payload from the remote host\n url = self._base_url + \"{}/\".format(version)\n name = \"poetry-{}-{}.tar.gz\".format(version, sys.platform)\n checksum = \"poetry-{}-{}.sha256sum\".format(version, sys.platform)\n\n try:\n r = urlopen(url + \"{}\".format(checksum))\n except HTTPError as e:\n if e.code == 404:\n raise RuntimeError(\"Could not find {} file\".format(checksum))\n\n raise\n\n checksum = r.read().decode()\n\n try:\n r = urlopen(url + \"{}\".format(name))\n except HTTPError as e:\n if e.code == 404:\n raise RuntimeError(\"Could not find {} file\".format(name))\n\n raise\n\n meta = r.info()\n size = int(meta[\"Content-Length\"])\n current = 0\n block_size = 8192\n\n print(\n \" - Downloading {} ({:.2f}MB)\".format(\n colorize(\"comment\", name), size / 1024 / 1024\n )\n )\n\n sha = hashlib.sha256()\n with temporary_directory(prefix=\"poetry-installer-\") as dir_:\n tar = os.path.join(dir_, name)\n with open(tar, \"wb\") as f:\n while True:\n buffer = r.read(block_size)\n if not buffer:\n break\n\n current += len(buffer)\n f.write(buffer)\n sha.update(buffer)\n\n # Checking hashes\n if checksum != sha.hexdigest():\n raise RuntimeError(\n \"Hashes for {} do not match: {} != {}\".format(\n name, checksum, sha.hexdigest()\n )\n )\n\n gz = GzipFile(tar, mode=\"rb\")\n try:\n with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:\n f.extractall(POETRY_LIB)\n finally:\n gz.close()\n\n def make_bin(self):\n if not os.path.exists(POETRY_BIN):\n os.mkdir(POETRY_BIN, 0o755)\n\n ext = \"\"\n if WINDOWS:\n with open(os.path.join(POETRY_BIN, \"poetry.bat\"), \"w\") as f:\n f.write(BAT)\n\n with open(os.path.join(POETRY_BIN, \"poetry\"), \"w\") as f:\n f.write(BIN)\n\n if not WINDOWS:\n # Making the file executable\n st = os.stat(os.path.join(POETRY_BIN, \"poetry\"))\n os.chmod(os.path.join(POETRY_BIN, \"poetry\"), st.st_mode | stat.S_IEXEC)\n\n def make_env(self):\n if WINDOWS:\n return\n\n with open(os.path.join(POETRY_HOME, \"env\"), \"w\") as f:\n f.write(self.get_export_string())\n\n def update_path(self):\n \"\"\"\n Tries to update the $PATH automatically.\n \"\"\"\n if WINDOWS:\n return self.add_to_windows_path()\n\n # Updating any profile we can on UNIX systems\n export_string = self.get_export_string()\n\n addition = \"\\n{}\".format(export_string)\n\n updated = []\n profiles = self.get_unix_profiles()\n for profile in profiles:\n if not os.path.exists(profile):\n continue\n\n with open(profile, \"r\") as f:\n content = f.read()\n\n if addition not in content:\n with open(profile, \"a\") as f:\n f.write(addition)\n\n updated.append(os.path.relpath(profile, HOME))\n\n def add_to_windows_path(self):\n try:\n old_path = self.get_windows_path_var()\n except WindowsError:\n old_path = None\n\n if old_path is None:\n print(\n colorize(\n \"warning\",\n \"Unable to get the PATH value. It will not be updated automatically\",\n )\n )\n self._modify_path = False\n\n return\n\n new_path = POETRY_BIN\n if POETRY_BIN in old_path:\n old_path = old_path.replace(POETRY_BIN + \";\", \"\")\n\n if old_path:\n new_path += \";\"\n new_path += old_path\n\n self.set_windows_path_var(new_path)\n\n def get_windows_path_var(self):\n with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root:\n with winreg.OpenKey(root, \"Environment\", 0, winreg.KEY_ALL_ACCESS) as key:\n path, _ = winreg.QueryValueEx(key, \"PATH\")\n\n return path\n\n def set_windows_path_var(self, value):\n import ctypes\n from ctypes.wintypes import HWND\n from ctypes.wintypes import LPARAM\n from ctypes.wintypes import LPVOID\n from ctypes.wintypes import UINT\n from ctypes.wintypes import WPARAM\n\n with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root:\n with winreg.OpenKey(root, \"Environment\", 0, winreg.KEY_ALL_ACCESS) as key:\n winreg.SetValueEx(key, \"PATH\", 0, winreg.REG_EXPAND_SZ, value)\n\n # Tell other processes to update their environment\n HWND_BROADCAST = 0xFFFF\n WM_SETTINGCHANGE = 0x1A\n\n SMTO_ABORTIFHUNG = 0x0002\n\n result = ctypes.c_long()\n SendMessageTimeoutW = ctypes.windll.user32.SendMessageTimeoutW\n SendMessageTimeoutW(\n HWND_BROADCAST,\n WM_SETTINGCHANGE,\n 0,\n u\"Environment\",\n SMTO_ABORTIFHUNG,\n 5000,\n ctypes.byref(result),\n )\n\n def remove_from_path(self):\n if WINDOWS:\n return self.remove_from_windows_path()\n\n return self.remove_from_unix_path()\n\n def remove_from_windows_path(self):\n path = self.get_windows_path_var()\n\n poetry_path = POETRY_BIN\n if poetry_path in path:\n path = path.replace(POETRY_BIN + \";\", \"\")\n\n if poetry_path in path:\n path = path.replace(POETRY_BIN, \"\")\n\n self.set_windows_path_var(path)\n\n def remove_from_unix_path(self):\n pass\n\n def get_export_string(self):\n path = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n export_string = 'export PATH=\"{}:$PATH\"'.format(path)\n\n return export_string\n\n def get_unix_profiles(self):\n profiles = [os.path.join(HOME, \".profile\")]\n\n shell = os.getenv(\"SHELL\")\n if \"zsh\" in shell:\n zdotdir = os.getenv(\"ZDOTDIR\", HOME)\n profiles.append(os.path.join(zdotdir, \".zprofile\"))\n\n bash_profile = os.path.join(HOME, \".bash_profile\")\n if os.path.exists(bash_profile):\n profiles.append(bash_profile)\n\n return profiles\n\n def display_pre_message(self):\n if WINDOWS:\n home = POETRY_BIN.replace(os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\")\n else:\n home = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"poetry_home_bin\": colorize(\"comment\", home),\n }\n\n if not self._modify_path:\n kwargs[\"platform_msg\"] = PRE_MESSAGE_NO_MODIFY_PATH\n else:\n if WINDOWS:\n kwargs[\"platform_msg\"] = PRE_MESSAGE_WINDOWS\n else:\n profiles = [\n colorize(\"comment\", p.replace(os.getenv(\"HOME\", \"\"), \"$HOME\"))\n for p in self.get_unix_profiles()\n ]\n kwargs[\"platform_msg\"] = PRE_MESSAGE_UNIX.format(\n rcfiles=\"\\n\".join(profiles), plural=\"s\" if len(profiles) > 1 else \"\"\n )\n\n print(PRE_MESSAGE.format(**kwargs))\n\n def display_pre_uninstall_message(self):\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"poetry_home_bin\": colorize(\n \"comment\",\n POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\").replace(\n os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\"\n ),\n ),\n }\n\n print(PRE_UNINSTALL_MESSAGE.format(**kwargs))\n\n def display_post_message(self, version):\n print(\"\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"version\": colorize(\"comment\", version),\n }\n\n if WINDOWS:\n message = POST_MESSAGE_WINDOWS\n if not self._modify_path:\n message = POST_MESSAGE_WINDOWS_NO_MODIFY_PATH\n\n poetry_home_bin = POETRY_BIN.replace(\n os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\"\n )\n else:\n message = POST_MESSAGE_UNIX\n if not self._modify_path:\n message = POST_MESSAGE_UNIX_NO_MODIFY_PATH\n\n poetry_home_bin = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n kwargs[\"poetry_home_env\"] = colorize(\n \"comment\", POETRY_ENV.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n )\n\n kwargs[\"poetry_home_bin\"] = colorize(\"comment\", poetry_home_bin)\n\n print(message.format(**kwargs))\n\n def call(self, *args):\n return subprocess.check_output(args, stderr=subprocess.STDOUT)\n\n def _get(self, url):\n request = Request(url, headers={\"User-Agent\": \"Python Poetry\"})\n\n with closing(urlopen(request)) as r:\n return r.read()\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Installs the latest (or given) version of poetry\"\n )\n parser.add_argument(\n \"-p\", \"--preview\", dest=\"preview\", action=\"store_true\", default=False\n )\n parser.add_argument(\"--version\", dest=\"version\")\n parser.add_argument(\n \"-f\", \"--force\", dest=\"force\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"-y\", \"--yes\", dest=\"accept_all\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"--uninstall\", dest=\"uninstall\", action=\"store_true\", default=False\n )\n\n args = parser.parse_args()\n\n installer = Installer(\n version=args.version or os.getenv(\"POETRY_VERSION\"),\n preview=args.preview or os.getenv(\"POETRY_PREVIEW\"),\n force=args.force,\n accept_all=args.accept_all,\n )\n\n if args.uninstall:\n return installer.uninstall()\n\n return installer.run()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "get-poetry.py" } ]
[ { "content": "\"\"\"\nThis script will install poetry and its dependencies\nin isolation from the rest of the system.\n\nIt does, in order:\n\n - Downloads the latest stable (or pre-release) version of poetry.\n - Downloads all its dependencies in the poetry/_vendor directory.\n - Copies it and all extra files in $POETRY_HOME.\n - Updates the PATH in a system-specific way.\n\nThere will be a `poetry` script that will be installed in $POETRY_HOME/bin\nwhich will act as the poetry command but is slightly different in the sense\nthat it will use the current Python installation.\n\nWhat this means is that one Poetry installation can serve for multiple\nPython versions.\n\"\"\"\nimport argparse\nimport hashlib\nimport json\nimport os\nimport platform\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\n\nfrom contextlib import closing\nfrom contextlib import contextmanager\nfrom functools import cmp_to_key\nfrom gzip import GzipFile\nfrom io import UnsupportedOperation\n\ntry:\n from urllib.error import HTTPError\n from urllib.request import Request\n from urllib.request import urlopen\nexcept ImportError:\n from urllib2 import HTTPError\n from urllib2 import Request\n from urllib2 import urlopen\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\ntry:\n try:\n import winreg\n except ImportError:\n import _winreg as winreg\nexcept ImportError:\n winreg = None\n\n\nWINDOWS = sys.platform.startswith(\"win\") or (sys.platform == \"cli\" and os.name == \"nt\")\n\n\nFOREGROUND_COLORS = {\n \"black\": 30,\n \"red\": 31,\n \"green\": 32,\n \"yellow\": 33,\n \"blue\": 34,\n \"magenta\": 35,\n \"cyan\": 36,\n \"white\": 37,\n}\n\nBACKGROUND_COLORS = {\n \"black\": 40,\n \"red\": 41,\n \"green\": 42,\n \"yellow\": 43,\n \"blue\": 44,\n \"magenta\": 45,\n \"cyan\": 46,\n \"white\": 47,\n}\n\nOPTIONS = {\"bold\": 1, \"underscore\": 4, \"blink\": 5, \"reverse\": 7, \"conceal\": 8}\n\n\ndef style(fg, bg, options):\n codes = []\n\n if fg:\n codes.append(FOREGROUND_COLORS[fg])\n\n if bg:\n codes.append(BACKGROUND_COLORS[bg])\n\n if options:\n if not isinstance(options, (list, tuple)):\n options = [options]\n\n for option in options:\n codes.append(OPTIONS[option])\n\n return \"\\033[{}m\".format(\";\".join(map(str, codes)))\n\n\nSTYLES = {\n \"info\": style(\"green\", None, None),\n \"comment\": style(\"yellow\", None, None),\n \"error\": style(\"red\", None, None),\n \"warning\": style(\"yellow\", None, None),\n}\n\n\ndef is_decorated():\n if platform.system().lower() == \"windows\":\n return (\n os.getenv(\"ANSICON\") is not None\n or \"ON\" == os.getenv(\"ConEmuANSI\")\n or \"xterm\" == os.getenv(\"Term\")\n )\n\n if not hasattr(sys.stdout, \"fileno\"):\n return False\n\n try:\n return os.isatty(sys.stdout.fileno())\n except UnsupportedOperation:\n return False\n\n\ndef colorize(style, text):\n if not is_decorated():\n return text\n\n return \"{}{}\\033[0m\".format(STYLES[style], text)\n\n\n@contextmanager\ndef temporary_directory(*args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n\n\ndef expanduser(path):\n \"\"\"\n Expand ~ and ~user constructions.\n\n Includes a workaround for http://bugs.python.org/issue14768\n \"\"\"\n expanded = os.path.expanduser(path)\n if path.startswith(\"~/\") and expanded.startswith(\"//\"):\n expanded = expanded[1:]\n\n return expanded\n\n\nHOME = expanduser(\"~\")\nPOETRY_HOME = os.path.join(HOME, \".poetry\")\nPOETRY_BIN = os.path.join(POETRY_HOME, \"bin\")\nPOETRY_ENV = os.path.join(POETRY_HOME, \"env\")\nPOETRY_LIB = os.path.join(POETRY_HOME, \"lib\")\nPOETRY_LIB_BACKUP = os.path.join(POETRY_HOME, \"lib-backup\")\n\n\nBIN = \"\"\"#!/usr/bin/env python\nimport glob\nimport sys\nimport os\n\nlib = os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"lib\"))\n\nsys.path.insert(0, lib)\n\nif __name__ == \"__main__\":\n from poetry.console import main\n\n main()\n\"\"\"\n\nBAT = \"@echo off\\r\\npython %USERPROFILE%/.poetry/bin/poetry %*\\r\\n\"\n\n\nPRE_MESSAGE = \"\"\"# Welcome to {poetry}!\n\nThis will download and install the latest version of {poetry},\na dependency and package manager for Python.\n\nIt will add the `poetry` command to {poetry}'s bin directory, located at:\n\n{poetry_home_bin}\n\n{platform_msg}\n\nYou can uninstall at any time with `poetry self:uninstall`,\nor by executing this script with the --uninstall option,\nand these changes will be reverted.\n\"\"\"\n\nPRE_UNINSTALL_MESSAGE = \"\"\"# We are sorry to see you go!\n\nThis will uninstall {poetry}.\n\nIt will remove the `poetry` command from {poetry}'s bin directory, located at:\n\n{poetry_home_bin}\n\nThis will also remove {poetry} from your system's PATH.\n\"\"\"\n\n\nPRE_MESSAGE_UNIX = \"\"\"This path will then be added to your `PATH` environment variable by\nmodifying the profile file{plural} located at:\n\n{rcfiles}\"\"\"\n\n\nPRE_MESSAGE_WINDOWS = \"\"\"This path will then be added to your `PATH` environment variable by\nmodifying the `HKEY_CURRENT_USER/Environment/PATH` registry key.\"\"\"\n\nPRE_MESSAGE_NO_MODIFY_PATH = \"\"\"This path needs to be in your `PATH` environment variable,\nbut will not be added automatically.\"\"\"\n\nPOST_MESSAGE_UNIX = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need {poetry}'s bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. Next time you log in this will be done\nautomatically.\n\nTo configure your current shell run `source {poetry_home_env}`\n\"\"\"\n\nPOST_MESSAGE_WINDOWS = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need Poetry's bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. Future applications will automatically have the\ncorrect environment, but you may need to restart your current shell.\n\"\"\"\n\nPOST_MESSAGE_UNIX_NO_MODIFY_PATH = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need {poetry}'s bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable.\n\nTo configure your current shell run `source {poetry_home_env}`\n\"\"\"\n\nPOST_MESSAGE_WINDOWS_NO_MODIFY_PATH = \"\"\"{poetry} ({version}) is installed now. Great!\n\nTo get started you need Poetry's bin directory ({poetry_home_bin}) in your `PATH`\nenvironment variable. This has not been done automatically.\n\"\"\"\n\n\nclass Installer:\n\n CURRENT_PYTHON = sys.executable\n CURRENT_PYTHON_VERSION = sys.version_info[:2]\n METADATA_URL = \"https://api.github.com/repos/sdispater/poetry/releases\"\n VERSION_REGEX = re.compile(\n \"v?(\\d+)(?:\\.(\\d+))?(?:\\.(\\d+))?(?:\\.(\\d+))?\"\n \"(\"\n \"[._-]?\"\n \"(?:(stable|beta|b|RC|alpha|a|patch|pl|p)((?:[.-]?\\d+)*)?)?\"\n \"([.-]?dev)?\"\n \")?\"\n \"(?:\\+[^\\s]+)?\"\n )\n\n BASE_URL = \"https://github.com/sdispater/poetry/releases/download/\"\n\n def __init__(\n self,\n version=None,\n preview=False,\n force=False,\n accept_all=False,\n base_url=BASE_URL,\n ):\n self._version = version\n self._preview = preview\n self._force = force\n self._modify_path = True\n self._accept_all = accept_all\n self._base_url = base_url\n\n def allows_prereleases(self):\n return self._preview\n\n def run(self):\n version, current_version = self.get_version()\n\n if version is None:\n return 0\n\n self.customize_install()\n self.display_pre_message()\n self.ensure_home()\n\n try:\n self.install(version, upgrade=current_version is not None)\n except subprocess.CalledProcessError as e:\n print(colorize(\"error\", \"An error has occured: {}\".format(str(e))))\n print(e.output.decode())\n\n return e.returncode\n\n self.display_post_message(version)\n\n return 0\n\n def uninstall(self):\n self.display_pre_uninstall_message()\n\n if not self.customize_uninstall():\n return\n\n self.remove_home()\n self.remove_from_path()\n\n def get_version(self):\n print(colorize(\"info\", \"Retrieving Poetry metadata\"))\n\n metadata = json.loads(self._get(self.METADATA_URL))\n\n def _compare_versions(x, y):\n mx = self.VERSION_REGEX.match(x)\n my = self.VERSION_REGEX.match(y)\n\n vx = tuple(int(p) for p in mx.groups()[:3]) + (mx.group(5),)\n vy = tuple(int(p) for p in my.groups()[:3]) + (my.group(5),)\n\n if vx < vy:\n return -1\n elif vx > vy:\n return 1\n\n return 0\n\n print(\"\")\n releases = sorted(\n [m[\"tag_name\"] for m in metadata], key=cmp_to_key(_compare_versions)\n )\n\n if self._version and self._version not in releases:\n print(colorize(\"error\", \"Version {} does not exist.\".format(self._version)))\n\n return None, None\n\n version = self._version\n if not version:\n for release in reversed(releases):\n m = self.VERSION_REGEX.match(release)\n if m.group(5) and not self.allows_prereleases():\n continue\n\n version = release\n\n break\n\n current_version = None\n if os.path.exists(POETRY_LIB):\n with open(os.path.join(POETRY_LIB, \"poetry\", \"__version__.py\")) as f:\n version_content = f.read()\n\n current_version_re = re.match(\n '(?ms).*__version__ = \"(.+)\".*', version_content\n )\n if not current_version_re:\n print(\n colorize(\n \"warning\",\n \"Unable to get the current Poetry version. Assuming None\",\n )\n )\n else:\n current_version = current_version_re.group(1)\n\n if current_version == version and not self._force:\n print(\"Latest version already installed.\")\n return None, current_version\n\n return version, current_version\n\n def customize_install(self):\n if not self._accept_all:\n print(\"Before we start, please answer the following questions.\")\n print(\"You may simple press the Enter key to keave unchanged.\")\n\n modify_path = input(\"Modify PATH variable? ([y]/n) \") or \"y\"\n if modify_path.lower() in {\"n\", \"no\"}:\n self._modify_path = False\n\n print(\"\")\n\n def customize_uninstall(self):\n if not self._accept_all:\n print()\n\n uninstall = (\n input(\"Are you sure you want to uninstall Poetry? (y/[n]) \") or \"n\"\n )\n if uninstall.lower() not in {\"y\", \"yes\"}:\n return False\n\n print(\"\")\n\n return True\n\n def customize_install(self):\n if not self._accept_all:\n print(\"Before we start, please answer the following questions.\")\n print(\"You may simple press the Enter key to keave unchanged.\")\n\n modify_path = input(\"Modify PATH variable? ([y]/n) \") or \"y\"\n if modify_path.lower() in {\"n\", \"no\"}:\n self._modify_path = False\n\n print(\"\")\n\n def ensure_home(self):\n \"\"\"\n Ensures that $POETRY_HOME exists or create it.\n \"\"\"\n if not os.path.exists(POETRY_HOME):\n os.mkdir(POETRY_HOME, 0o755)\n\n def remove_home(self):\n \"\"\"\n Removes $POETRY_HOME.\n \"\"\"\n if not os.path.exists(POETRY_HOME):\n return\n\n shutil.rmtree(POETRY_HOME)\n\n def install(self, version, upgrade=False):\n \"\"\"\n Installs Poetry in $POETRY_HOME.\n \"\"\"\n print(\"Installing version: \" + colorize(\"info\", version))\n\n self.make_lib(version)\n self.make_bin()\n self.make_env()\n self.update_path()\n\n return 0\n\n def make_lib(self, version):\n \"\"\"\n Packs everything into a single lib/ directory.\n \"\"\"\n if os.path.exists(POETRY_LIB_BACKUP):\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n # Backup the current installation\n if os.path.exists(POETRY_LIB):\n shutil.copytree(POETRY_LIB, POETRY_LIB_BACKUP)\n shutil.rmtree(POETRY_LIB)\n\n try:\n self._make_lib(version)\n except Exception:\n if not os.path.exists(POETRY_LIB_BACKUP):\n raise\n\n shutil.copytree(POETRY_LIB_BACKUP, POETRY_LIB)\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n raise\n finally:\n if os.path.exists(POETRY_LIB_BACKUP):\n shutil.rmtree(POETRY_LIB_BACKUP)\n\n def _make_lib(self, version):\n # We get the payload from the remote host\n url = self._base_url + \"{}/\".format(version)\n name = \"poetry-{}-{}.tar.gz\".format(version, sys.platform)\n checksum = \"poetry-{}-{}.sha256sum\".format(version, sys.platform)\n\n try:\n r = urlopen(url + \"{}\".format(checksum))\n except HTTPError as e:\n if e.code == 404:\n raise RuntimeError(\"Could not find {} file\".format(checksum))\n\n raise\n\n checksum = r.read().decode()\n\n try:\n r = urlopen(url + \"{}\".format(name))\n except HTTPError as e:\n if e.code == 404:\n raise RuntimeError(\"Could not find {} file\".format(name))\n\n raise\n\n meta = r.info()\n size = int(meta[\"Content-Length\"])\n current = 0\n block_size = 8192\n\n print(\n \" - Downloading {} ({:.2f}MB)\".format(\n colorize(\"comment\", name), size / 1024 / 1024\n )\n )\n\n sha = hashlib.sha256()\n with temporary_directory(prefix=\"poetry-installer-\") as dir_:\n tar = os.path.join(dir_, name)\n with open(tar, \"wb\") as f:\n while True:\n buffer = r.read(block_size)\n if not buffer:\n break\n\n current += len(buffer)\n f.write(buffer)\n sha.update(buffer)\n\n # Checking hashes\n if checksum != sha.hexdigest():\n raise RuntimeError(\n \"Hashes for {} do not match: {} != {}\".format(\n name, checksum, sha.hexdigest()\n )\n )\n\n gz = GzipFile(tar, mode=\"rb\")\n try:\n with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:\n f.extractall(POETRY_LIB)\n finally:\n gz.close()\n\n def make_bin(self):\n if not os.path.exists(POETRY_BIN):\n os.mkdir(POETRY_BIN, 0o755)\n\n ext = \"\"\n if WINDOWS:\n with open(os.path.join(POETRY_BIN, \"poetry.bat\"), \"w\") as f:\n f.write(BAT)\n\n with open(os.path.join(POETRY_BIN, \"poetry\"), \"w\") as f:\n f.write(BIN)\n\n if not WINDOWS:\n # Making the file executable\n st = os.stat(os.path.join(POETRY_BIN, \"poetry\"))\n os.chmod(os.path.join(POETRY_BIN, \"poetry\"), st.st_mode | stat.S_IEXEC)\n\n def make_env(self):\n if WINDOWS:\n return\n\n with open(os.path.join(POETRY_HOME, \"env\"), \"w\") as f:\n f.write(self.get_export_string())\n\n def update_path(self):\n \"\"\"\n Tries to update the $PATH automatically.\n \"\"\"\n if WINDOWS:\n return self.add_to_windows_path()\n\n # Updating any profile we can on UNIX systems\n export_string = self.get_export_string()\n\n addition = \"\\n{}\\n\".format(export_string)\n\n updated = []\n profiles = self.get_unix_profiles()\n for profile in profiles:\n if not os.path.exists(profile):\n continue\n\n with open(profile, \"r\") as f:\n content = f.read()\n\n if addition not in content:\n with open(profile, \"a\") as f:\n f.write(addition)\n\n updated.append(os.path.relpath(profile, HOME))\n\n def add_to_windows_path(self):\n try:\n old_path = self.get_windows_path_var()\n except WindowsError:\n old_path = None\n\n if old_path is None:\n print(\n colorize(\n \"warning\",\n \"Unable to get the PATH value. It will not be updated automatically\",\n )\n )\n self._modify_path = False\n\n return\n\n new_path = POETRY_BIN\n if POETRY_BIN in old_path:\n old_path = old_path.replace(POETRY_BIN + \";\", \"\")\n\n if old_path:\n new_path += \";\"\n new_path += old_path\n\n self.set_windows_path_var(new_path)\n\n def get_windows_path_var(self):\n with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root:\n with winreg.OpenKey(root, \"Environment\", 0, winreg.KEY_ALL_ACCESS) as key:\n path, _ = winreg.QueryValueEx(key, \"PATH\")\n\n return path\n\n def set_windows_path_var(self, value):\n import ctypes\n from ctypes.wintypes import HWND\n from ctypes.wintypes import LPARAM\n from ctypes.wintypes import LPVOID\n from ctypes.wintypes import UINT\n from ctypes.wintypes import WPARAM\n\n with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root:\n with winreg.OpenKey(root, \"Environment\", 0, winreg.KEY_ALL_ACCESS) as key:\n winreg.SetValueEx(key, \"PATH\", 0, winreg.REG_EXPAND_SZ, value)\n\n # Tell other processes to update their environment\n HWND_BROADCAST = 0xFFFF\n WM_SETTINGCHANGE = 0x1A\n\n SMTO_ABORTIFHUNG = 0x0002\n\n result = ctypes.c_long()\n SendMessageTimeoutW = ctypes.windll.user32.SendMessageTimeoutW\n SendMessageTimeoutW(\n HWND_BROADCAST,\n WM_SETTINGCHANGE,\n 0,\n u\"Environment\",\n SMTO_ABORTIFHUNG,\n 5000,\n ctypes.byref(result),\n )\n\n def remove_from_path(self):\n if WINDOWS:\n return self.remove_from_windows_path()\n\n return self.remove_from_unix_path()\n\n def remove_from_windows_path(self):\n path = self.get_windows_path_var()\n\n poetry_path = POETRY_BIN\n if poetry_path in path:\n path = path.replace(POETRY_BIN + \";\", \"\")\n\n if poetry_path in path:\n path = path.replace(POETRY_BIN, \"\")\n\n self.set_windows_path_var(path)\n\n def remove_from_unix_path(self):\n pass\n\n def get_export_string(self):\n path = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n export_string = 'export PATH=\"{}:$PATH\"'.format(path)\n\n return export_string\n\n def get_unix_profiles(self):\n profiles = [os.path.join(HOME, \".profile\")]\n\n shell = os.getenv(\"SHELL\")\n if \"zsh\" in shell:\n zdotdir = os.getenv(\"ZDOTDIR\", HOME)\n profiles.append(os.path.join(zdotdir, \".zprofile\"))\n\n bash_profile = os.path.join(HOME, \".bash_profile\")\n if os.path.exists(bash_profile):\n profiles.append(bash_profile)\n\n return profiles\n\n def display_pre_message(self):\n if WINDOWS:\n home = POETRY_BIN.replace(os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\")\n else:\n home = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"poetry_home_bin\": colorize(\"comment\", home),\n }\n\n if not self._modify_path:\n kwargs[\"platform_msg\"] = PRE_MESSAGE_NO_MODIFY_PATH\n else:\n if WINDOWS:\n kwargs[\"platform_msg\"] = PRE_MESSAGE_WINDOWS\n else:\n profiles = [\n colorize(\"comment\", p.replace(os.getenv(\"HOME\", \"\"), \"$HOME\"))\n for p in self.get_unix_profiles()\n ]\n kwargs[\"platform_msg\"] = PRE_MESSAGE_UNIX.format(\n rcfiles=\"\\n\".join(profiles), plural=\"s\" if len(profiles) > 1 else \"\"\n )\n\n print(PRE_MESSAGE.format(**kwargs))\n\n def display_pre_uninstall_message(self):\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"poetry_home_bin\": colorize(\n \"comment\",\n POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\").replace(\n os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\"\n ),\n ),\n }\n\n print(PRE_UNINSTALL_MESSAGE.format(**kwargs))\n\n def display_post_message(self, version):\n print(\"\")\n\n kwargs = {\n \"poetry\": colorize(\"info\", \"Poetry\"),\n \"version\": colorize(\"comment\", version),\n }\n\n if WINDOWS:\n message = POST_MESSAGE_WINDOWS\n if not self._modify_path:\n message = POST_MESSAGE_WINDOWS_NO_MODIFY_PATH\n\n poetry_home_bin = POETRY_BIN.replace(\n os.getenv(\"USERPROFILE\", \"\"), \"%USERPROFILE%\"\n )\n else:\n message = POST_MESSAGE_UNIX\n if not self._modify_path:\n message = POST_MESSAGE_UNIX_NO_MODIFY_PATH\n\n poetry_home_bin = POETRY_BIN.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n kwargs[\"poetry_home_env\"] = colorize(\n \"comment\", POETRY_ENV.replace(os.getenv(\"HOME\", \"\"), \"$HOME\")\n )\n\n kwargs[\"poetry_home_bin\"] = colorize(\"comment\", poetry_home_bin)\n\n print(message.format(**kwargs))\n\n def call(self, *args):\n return subprocess.check_output(args, stderr=subprocess.STDOUT)\n\n def _get(self, url):\n request = Request(url, headers={\"User-Agent\": \"Python Poetry\"})\n\n with closing(urlopen(request)) as r:\n return r.read()\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Installs the latest (or given) version of poetry\"\n )\n parser.add_argument(\n \"-p\", \"--preview\", dest=\"preview\", action=\"store_true\", default=False\n )\n parser.add_argument(\"--version\", dest=\"version\")\n parser.add_argument(\n \"-f\", \"--force\", dest=\"force\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"-y\", \"--yes\", dest=\"accept_all\", action=\"store_true\", default=False\n )\n parser.add_argument(\n \"--uninstall\", dest=\"uninstall\", action=\"store_true\", default=False\n )\n\n args = parser.parse_args()\n\n installer = Installer(\n version=args.version or os.getenv(\"POETRY_VERSION\"),\n preview=args.preview or os.getenv(\"POETRY_PREVIEW\"),\n force=args.force,\n accept_all=args.accept_all,\n )\n\n if args.uninstall:\n return installer.uninstall()\n\n return installer.run()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "get-poetry.py" } ]
diff --git a/get-poetry.py b/get-poetry.py index 55e32b42c04..3b1587afadc 100644 --- a/get-poetry.py +++ b/get-poetry.py @@ -581,7 +581,7 @@ def update_path(self): # Updating any profile we can on UNIX systems export_string = self.get_export_string() - addition = "\n{}".format(export_string) + addition = "\n{}\n".format(export_string) updated = [] profiles = self.get_unix_profiles()
internetarchive__openlibrary-7672
pre-commit: Add Python linting tool ruff Add https://beta.ruff.rs to our pre-commit as a replacement for `flake8`, `isort`, `pyupgrade`, etc. but written in Rust instead of Python. It can lint the CPython codebase from scratch in 0.29 seconds. Adopting Ruff should greatly accelerate our pre-commits and our pre-commit.ci jobs. We will run the new and old tools in parallel for a sprint or two to verify the coverage before we completely drop the old tools.
[ { "content": "\"\"\" This entire module is deprecated,\n openlibrary.catalog.marc.get_subjects is the preferred module\n\"\"\"\n\n# Tell the flake8 linter to ignore this deprecated file.\n# flake8: noqa\n\nfrom collections import defaultdict\nfrom deprecated import deprecated\nfrom lxml import etree\nimport re\n\n\nfrom openlibrary.catalog.utils.query import get_mc\nfrom openlibrary.catalog.get_ia import (\n get_from_archive,\n marc_formats,\n urlopen_keep_trying,\n)\nfrom openlibrary.catalog.marc import get_subjects\nfrom openlibrary.catalog.marc.marc_binary import MarcBinary\nfrom openlibrary.catalog.marc.marc_xml import (\n read_marc_file,\n MarcXml,\n BlankTag,\n BadSubtag,\n)\nfrom openlibrary.catalog.utils import (\n remove_trailing_dot,\n remove_trailing_number_dot,\n flip_name,\n)\n\n\nsubject_fields = {'600', '610', '611', '630', '648', '650', '651', '662'}\n\nre_flip_name = re.compile('^(.+), ([A-Z].+)$')\n\n# 'Rhodes, Dan (Fictitious character)'\nre_fictitious_character = re.compile(r'^(.+), (.+)( \\(.* character\\))$')\nre_etc = re.compile('^(.+?)[, .]+etc[, .]?$', re.I)\nre_comma = re.compile('^([A-Z])([A-Za-z ]+?) *, ([A-Z][A-Z a-z]+)$')\n\nre_place_comma = re.compile('^(.+), (.+)$')\nre_paren = re.compile('[()]')\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_place() instead.')\ndef flip_place(s):\n return get_subjects.flip_place(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_subject() instead.')\ndef flip_subject(s):\n return get_subjects.flip_subject(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.four_types() instead.')\ndef four_types(i):\n return get_subjects.four_types(i)\n\n\narchive_url = \"http://archive.org/download/\"\n\n\n@deprecated\ndef load_binary(ia):\n url = archive_url + ia + '/' + ia + '_meta.mrc'\n f = urlopen_keep_trying(url)\n data = f.content\n assert '<title>Internet Archive: Page Not Found</title>' not in data[:200]\n if len(data) != int(data[:5]):\n data = data.decode('utf-8').encode('raw_unicode_escape')\n if len(data) != int(data[:5]):\n return\n return MarcBinary(data)\n\n\n@deprecated\ndef load_xml(ia):\n url = archive_url + ia + '/' + ia + '_marc.xml'\n f = urlopen_keep_trying(url)\n root = etree.fromstring(f.text).getroot()\n if root.tag == '{http://www.loc.gov/MARC21/slim}collection':\n root = root[0]\n return MarcXml(root)\n\n\n@deprecated\ndef subjects_for_work(rec):\n field_map = {\n 'subject': 'subjects',\n 'place': 'subject_places',\n 'time': 'subject_times',\n 'person': 'subject_people',\n }\n\n subjects = four_types(read_subjects(rec))\n\n return {field_map[k]: list(v) for k, v in subjects.items()}\n\n\nre_edition_key = re.compile(r'^/(?:b|books)/(OL\\d+M)$')\n\n\n@deprecated\ndef get_subjects_from_ia(ia):\n formats = marc_formats(ia)\n if not any(formats.values()):\n return {}\n rec = None\n if formats['bin']:\n rec = load_binary(ia)\n if not rec:\n assert formats['xml']\n rec = load_xml(ia)\n return read_subjects(rec)\n\n\nre_ia_marc = re.compile(r'^(?:.*/)?([^/]+)_(marc\\.xml|meta\\.mrc)(:0:\\d+)?$')\n\n\n@deprecated\ndef get_work_subjects(w, do_get_mc=True):\n found = set()\n for e in w['editions']:\n sr = e.get('source_records', [])\n if sr:\n for i in sr:\n if i.endswith('initial import'):\n continue\n if i.startswith(('ia:', 'marc:')):\n found.add(i)\n continue\n else:\n mc = None\n if do_get_mc:\n m = re_edition_key.match(e['key'])\n mc = get_mc('/b/' + m.group(1))\n if mc:\n if mc.endswith('initial import'):\n continue\n if not mc.startswith('amazon:') and not re_ia_marc.match(mc):\n found.add('marc:' + mc)\n subjects = []\n for sr in found:\n if sr.startswith('marc:ia:'):\n subjects.append(get_subjects_from_ia(sr[8:]))\n elif sr.startswith('marc:'):\n loc = sr[5:]\n data = get_from_archive(loc)\n rec = MarcBinary(data)\n subjects.append(read_subjects(rec))\n else:\n assert sr.startswith('ia:')\n subjects.append(get_subjects_from_ia(sr[3:]))\n return combine_subjects(subjects)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.tidy_subject() instead.')\ndef tidy_subject(s):\n return get_subjects.tidy_subject(s)\n\n\nre_aspects = re.compile(' [Aa]spects$')\n\n\n@deprecated\ndef find_aspects(f):\n cur = [(i, j) for i, j in f.get_subfields('ax')]\n if len(cur) < 2 or cur[0][0] != 'a' or cur[1][0] != 'x':\n return\n a, x = cur[0][1], cur[1][1]\n x = x.strip('. ')\n a = a.strip('. ')\n if not re_aspects.search(x):\n return\n if a == 'Body, Human':\n a = 'the Human body'\n return x + ' of ' + flip_subject(a)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.read_subject() instead.')\ndef read_subjects(rec):\n return get_subjects.read_subject(s)\n\n\n@deprecated\ndef combine_subjects(subjects):\n all_subjects = defaultdict(lambda: defaultdict(int))\n for a in subjects:\n for b, c in a.items():\n for d, e in c.items():\n all_subjects[b][d] += e\n return {k: dict(v) for k, v in all_subjects.items()}\n", "path": "openlibrary/catalog/marc/marc_subject.py" } ]
[ { "content": "\"\"\" This entire module is deprecated,\n openlibrary.catalog.marc.get_subjects is the preferred module\n\"\"\"\n\n# Tell the ruff linter to ignore this deprecated file.\n# ruff: noqa\n\nfrom collections import defaultdict\nfrom deprecated import deprecated\nfrom lxml import etree\nimport re\n\n\nfrom openlibrary.catalog.utils.query import get_mc\nfrom openlibrary.catalog.get_ia import (\n get_from_archive,\n marc_formats,\n urlopen_keep_trying,\n)\nfrom openlibrary.catalog.marc import get_subjects\nfrom openlibrary.catalog.marc.marc_binary import MarcBinary\nfrom openlibrary.catalog.marc.marc_xml import (\n read_marc_file,\n MarcXml,\n BlankTag,\n BadSubtag,\n)\nfrom openlibrary.catalog.utils import (\n remove_trailing_dot,\n remove_trailing_number_dot,\n flip_name,\n)\n\n\nsubject_fields = {'600', '610', '611', '630', '648', '650', '651', '662'}\n\nre_flip_name = re.compile('^(.+), ([A-Z].+)$')\n\n# 'Rhodes, Dan (Fictitious character)'\nre_fictitious_character = re.compile(r'^(.+), (.+)( \\(.* character\\))$')\nre_etc = re.compile('^(.+?)[, .]+etc[, .]?$', re.I)\nre_comma = re.compile('^([A-Z])([A-Za-z ]+?) *, ([A-Z][A-Z a-z]+)$')\n\nre_place_comma = re.compile('^(.+), (.+)$')\nre_paren = re.compile('[()]')\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_place() instead.')\ndef flip_place(s):\n return get_subjects.flip_place(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_subject() instead.')\ndef flip_subject(s):\n return get_subjects.flip_subject(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.four_types() instead.')\ndef four_types(i):\n return get_subjects.four_types(i)\n\n\narchive_url = \"http://archive.org/download/\"\n\n\n@deprecated\ndef load_binary(ia):\n url = archive_url + ia + '/' + ia + '_meta.mrc'\n f = urlopen_keep_trying(url)\n data = f.content\n assert '<title>Internet Archive: Page Not Found</title>' not in data[:200]\n if len(data) != int(data[:5]):\n data = data.decode('utf-8').encode('raw_unicode_escape')\n if len(data) != int(data[:5]):\n return\n return MarcBinary(data)\n\n\n@deprecated\ndef load_xml(ia):\n url = archive_url + ia + '/' + ia + '_marc.xml'\n f = urlopen_keep_trying(url)\n root = etree.fromstring(f.text).getroot()\n if root.tag == '{http://www.loc.gov/MARC21/slim}collection':\n root = root[0]\n return MarcXml(root)\n\n\n@deprecated\ndef subjects_for_work(rec):\n field_map = {\n 'subject': 'subjects',\n 'place': 'subject_places',\n 'time': 'subject_times',\n 'person': 'subject_people',\n }\n\n subjects = four_types(read_subjects(rec))\n\n return {field_map[k]: list(v) for k, v in subjects.items()}\n\n\nre_edition_key = re.compile(r'^/(?:b|books)/(OL\\d+M)$')\n\n\n@deprecated\ndef get_subjects_from_ia(ia):\n formats = marc_formats(ia)\n if not any(formats.values()):\n return {}\n rec = None\n if formats['bin']:\n rec = load_binary(ia)\n if not rec:\n assert formats['xml']\n rec = load_xml(ia)\n return read_subjects(rec)\n\n\nre_ia_marc = re.compile(r'^(?:.*/)?([^/]+)_(marc\\.xml|meta\\.mrc)(:0:\\d+)?$')\n\n\n@deprecated\ndef get_work_subjects(w, do_get_mc=True):\n found = set()\n for e in w['editions']:\n sr = e.get('source_records', [])\n if sr:\n for i in sr:\n if i.endswith('initial import'):\n continue\n if i.startswith(('ia:', 'marc:')):\n found.add(i)\n continue\n else:\n mc = None\n if do_get_mc:\n m = re_edition_key.match(e['key'])\n mc = get_mc('/b/' + m.group(1))\n if mc:\n if mc.endswith('initial import'):\n continue\n if not mc.startswith('amazon:') and not re_ia_marc.match(mc):\n found.add('marc:' + mc)\n subjects = []\n for sr in found:\n if sr.startswith('marc:ia:'):\n subjects.append(get_subjects_from_ia(sr[8:]))\n elif sr.startswith('marc:'):\n loc = sr[5:]\n data = get_from_archive(loc)\n rec = MarcBinary(data)\n subjects.append(read_subjects(rec))\n else:\n assert sr.startswith('ia:')\n subjects.append(get_subjects_from_ia(sr[3:]))\n return combine_subjects(subjects)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.tidy_subject() instead.')\ndef tidy_subject(s):\n return get_subjects.tidy_subject(s)\n\n\nre_aspects = re.compile(' [Aa]spects$')\n\n\n@deprecated\ndef find_aspects(f):\n cur = [(i, j) for i, j in f.get_subfields('ax')]\n if len(cur) < 2 or cur[0][0] != 'a' or cur[1][0] != 'x':\n return\n a, x = cur[0][1], cur[1][1]\n x = x.strip('. ')\n a = a.strip('. ')\n if not re_aspects.search(x):\n return\n if a == 'Body, Human':\n a = 'the Human body'\n return x + ' of ' + flip_subject(a)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.read_subject() instead.')\ndef read_subjects(rec):\n return get_subjects.read_subject(s)\n\n\n@deprecated\ndef combine_subjects(subjects):\n all_subjects = defaultdict(lambda: defaultdict(int))\n for a in subjects:\n for b, c in a.items():\n for d, e in c.items():\n all_subjects[b][d] += e\n return {k: dict(v) for k, v in all_subjects.items()}\n", "path": "openlibrary/catalog/marc/marc_subject.py" } ]
diff --git a/.flake8 b/.flake8 deleted file mode 100644 index ebf02ca46cf..00000000000 --- a/.flake8 +++ /dev/null @@ -1,13 +0,0 @@ -[flake8] -count = true -exclude = ./.*,vendor/*,node_modules/* -# 55 E203 whitespace before ':' -- Do not fix because black will undo it -# 18 E402 module level import not at top of file -# 47 E722 do not use bare 'except' -# 119 F401 '.model.*' imported but unused -# 60 F841 local variable 'account' is assigned to but never used -extend-ignore = E203,E402,E722,F401,F841,I -max-complexity = 41 -max-line-length = 200 -show-source = true -statistics = true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 274a29f76a6..726c64ad5a0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -58,20 +58,7 @@ repos: additional_dependencies: - types-all - - repo: https://github.com/asottile/pyupgrade - rev: v3.3.1 - hooks: - - id: pyupgrade - args: # Solr on Cython is not yet ready for 3.10 type hints - - --py39-plus - - --keep-runtime-typing - - repo: https://github.com/abravalheri/validate-pyproject rev: v0.12.1 hooks: - id: validate-pyproject - - - repo: https://github.com/pycqa/flake8 - rev: 6.0.0 - hooks: - - id: flake8 # See the file .flake8 for args diff --git a/Makefile b/Makefile index 1df4bef873e..d4db45e6137 100644 --- a/Makefile +++ b/Makefile @@ -65,8 +65,8 @@ reindex-solr: PYTHONPATH=$(PWD) python ./scripts/solr_builder/solr_builder/index_subjects.py time lint: - # See the file .flake8 for flake8's settings - $(PYTHON) -m flake8 . + # See the pyproject.toml file for ruff's settings + $(PYTHON) -m ruff --no-cache . test-py: pytest . --ignore=tests/integration --ignore=infogami --ignore=vendor --ignore=node_modules diff --git a/openlibrary/catalog/marc/marc_subject.py b/openlibrary/catalog/marc/marc_subject.py index 5008dfeb214..6a77a83d127 100644 --- a/openlibrary/catalog/marc/marc_subject.py +++ b/openlibrary/catalog/marc/marc_subject.py @@ -2,8 +2,8 @@ openlibrary.catalog.marc.get_subjects is the preferred module """ -# Tell the flake8 linter to ignore this deprecated file. -# flake8: noqa +# Tell the ruff linter to ignore this deprecated file. +# ruff: noqa from collections import defaultdict from deprecated import deprecated diff --git a/renovate.json b/renovate.json index 942a5ac5939..628fe04a424 100644 --- a/renovate.json +++ b/renovate.json @@ -19,7 +19,7 @@ "automerge": true }, { - "matchPackageNames": ["flake8", "mypy", "pytest", "pytest-asyncio", "safety"], + "matchPackageNames": ["mypy", "pytest", "pytest-asyncio", "ruff", "safety"], "automerge": true }, { diff --git a/requirements_test.txt b/requirements_test.txt index ca4fd808624..5fb3c6e74d4 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -4,9 +4,9 @@ -r requirements.txt debugpy>=1.6.4 -flake8==6.0.0 -mypy==1.0.0 +mypy==1.1.1 pymemcache==4.0.0 -pytest==7.2.1 +pytest==7.2.2 pytest-asyncio==0.20.3 +ruff==0.0.256 safety==2.3.5 diff --git a/scripts/test_py3.sh b/scripts/test_py3.sh index 8d20a81909c..b0226e501c3 100755 --- a/scripts/test_py3.sh +++ b/scripts/test_py3.sh @@ -7,7 +7,7 @@ pytest . \ --ignore=vendor RETURN_CODE=$? -flake8 --exit-zero --count --select=E722,F403 --show-source --statistics # Show bare exceptions and wildcard (*) imports +ruff --exit-zero --select=E722,F403 --show-source # Show bare exceptions and wildcard (*) imports safety check || true # Show any insecure dependencies exit ${RETURN_CODE}
numpy__numpy-12831
[BUG] Intel Fortran compiler installed but not found Hi, I have intel compilers installed, but get this: ``` Found executable /opt/intel/compilers_and_libraries_2018.1.163/linux/bin/intel64/icc customize IntelEM64TFCompiler Found executable /opt/intel/compilers_and_libraries_2018.1.163/linux/bin/intel64/ifort Traceback (most recent call last): File "setup.py", line 418, in <module>setup_package() File "setup.py", line 414, in setup_packagesetup(**metadata) File "/usr/lib/python3.6/site-packages/numpy/distutils/core.py", line 169, in setupreturn old_setup(**new_attr) File "/usr/lib/python3.6/site-packages/setuptools/__init__.py", line 129, in setupreturn distutils.core.setup(**attrs) File "/usr/lib/python3.6/distutils/core.py", line 148, in setupdist.run_commands() File "/usr/lib/python3.6/distutils/dist.py", line 955, in run_commandsself.run_command(cmd) File "/usr/lib/python3.6/distutils/dist.py", line 974, in run_commandcmd_obj.run() File "/usr/lib/python3.6/site-packages/numpy/distutils/command/build_clib.py", line 75, in runself.run_command('build_src') File "/usr/lib/python3.6/distutils/cmd.py", line 313, in run_commandself.distribution.run_command(command) File "/usr/lib/python3.6/distutils/dist.py", line 974, in run_commandcmd_obj.run() File "/usr/lib/python3.6/site-packages/numpy/distutils/command/build_src.py", line 148, in runself.build_sources() File "/usr/lib/python3.6/site-packages/numpy/distutils/command/build_src.py", line 165, in build_sourcesself.build_extension_sources(ext) File "/usr/lib/python3.6/site-packages/numpy/distutils/command/build_src.py", line 324, in build_extension_sourcessources = self.generate_sources(sources, ext) File "/usr/lib/python3.6/site-packages/numpy/distutils/command/build_src.py", line 377, in generate_sourcessource = func(extension, build_dir) File "scipy/_lib/setup.py", line 32, in get_messagestream_configif config_cmd.check_func('open_memstream', decl=True, call=True): File "/usr/lib/python3.6/site-packages/numpy/distutils/command/config.py", line 312, in check_funcself._check_compiler() File "/usr/lib/python3.6/site-packages/numpy/distutils/command/config.py", line 83, in _check_compilerself.fcompiler.customize(self.distribution) File "/usr/lib/python3.6/site-packages/numpy/distutils/fcompiler/__init__.py", line 499, in customizeget_flags('opt', oflags) File "/usr/lib/python3.6/site-packages/numpy/distutils/fcompiler/__init__.py", line 490, in get_flagsflags.extend(getattr(self.flag_vars, tag)) File "/usr/lib/python3.6/site-packages/numpy/distutils/environment.py", line 39, in __getattr__return self._get_var(name, conf_desc) File "/usr/lib/python3.6/site-packages/numpy/distutils/environment.py", line 53, in _get_varvar = self._hook_handler(name, hook) File "/usr/lib/python3.6/site-packages/numpy/distutils/fcompiler/__init__.py", line 697, in _environment_hookreturn hook() File "/usr/lib/python3.6/site-packages/numpy/distutils/fcompiler/intel.py", line 125, in get_flags_optv = self.get_version() File "/usr/lib/python3.6/site-packages/numpy/distutils/fcompiler/__init__.py", line 430, in get_versionraise CompilerNotFound() numpy.distutils.fcompiler.CompilerNotFound ```
[ { "content": "from __future__ import division, absolute_import, print_function\n\nimport os\nimport re\nimport sys\nimport types\nimport shlex\nimport time\nimport subprocess\nfrom copy import copy\nfrom distutils import ccompiler\nfrom distutils.ccompiler import *\nfrom distutils.errors import DistutilsExecError, DistutilsModuleError, \\\n DistutilsPlatformError, CompileError\nfrom distutils.sysconfig import customize_compiler\nfrom distutils.version import LooseVersion\n\nfrom numpy.distutils import log\nfrom numpy.distutils.compat import get_exception\nfrom numpy.distutils.exec_command import (\n filepath_from_subprocess_output, forward_bytes_to_stdout\n)\nfrom numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \\\n get_num_build_jobs, \\\n _commandline_dep_string\n\n# globals for parallel build management\ntry:\n import threading\nexcept ImportError:\n import dummy_threading as threading\n_job_semaphore = None\n_global_lock = threading.Lock()\n_processing_files = set()\n\n\ndef _needs_build(obj, cc_args, extra_postargs, pp_opts):\n \"\"\"\n Check if an objects needs to be rebuild based on its dependencies\n\n Parameters\n ----------\n obj : str\n object file\n\n Returns\n -------\n bool\n \"\"\"\n # defined in unixcompiler.py\n dep_file = obj + '.d'\n if not os.path.exists(dep_file):\n return True\n\n # dep_file is a makefile containing 'object: dependencies'\n # formatted like posix shell (spaces escaped, \\ line continuations)\n # the last line contains the compiler commandline arguments as some\n # projects may compile an extension multiple times with different\n # arguments\n with open(dep_file, \"r\") as f:\n lines = f.readlines()\n\n cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts)\n last_cmdline = lines[-1]\n if last_cmdline != cmdline:\n return True\n\n contents = ''.join(lines[:-1])\n deps = [x for x in shlex.split(contents, posix=True)\n if x != \"\\n\" and not x.endswith(\":\")]\n\n try:\n t_obj = os.stat(obj).st_mtime\n\n # check if any of the dependencies is newer than the object\n # the dependencies includes the source used to create the object\n for f in deps:\n if os.stat(f).st_mtime > t_obj:\n return True\n except OSError:\n # no object counts as newer (shouldn't happen if dep_file exists)\n return True\n\n return False\n\n\ndef replace_method(klass, method_name, func):\n if sys.version_info[0] < 3:\n m = types.MethodType(func, None, klass)\n else:\n # Py3k does not have unbound method anymore, MethodType does not work\n m = lambda self, *args, **kw: func(self, *args, **kw)\n setattr(klass, method_name, m)\n\n\n######################################################################\n## Method that subclasses may redefine. But don't call this method,\n## it i private to CCompiler class and may return unexpected\n## results if used elsewhere. So, you have been warned..\n\ndef CCompiler_find_executables(self):\n \"\"\"\n Does nothing here, but is called by the get_version method and can be\n overridden by subclasses. In particular it is redefined in the `FCompiler`\n class where more documentation can be found.\n\n \"\"\"\n pass\n\n\nreplace_method(CCompiler, 'find_executables', CCompiler_find_executables)\n\n\n# Using customized CCompiler.spawn.\ndef CCompiler_spawn(self, cmd, display=None):\n \"\"\"\n Execute a command in a sub-process.\n\n Parameters\n ----------\n cmd : str\n The command to execute.\n display : str or sequence of str, optional\n The text to add to the log file kept by `numpy.distutils`.\n If not given, `display` is equal to `cmd`.\n\n Returns\n -------\n None\n\n Raises\n ------\n DistutilsExecError\n If the command failed, i.e. the exit status was not 0.\n\n \"\"\"\n if display is None:\n display = cmd\n if is_sequence(display):\n display = ' '.join(list(display))\n log.info(display)\n try:\n subprocess.check_output(cmd)\n except subprocess.CalledProcessError as exc:\n o = exc.output\n s = exc.returncode\n except OSError:\n # OSError doesn't have the same hooks for the exception\n # output, but exec_command() historically would use an\n # empty string for EnvironmentError (base class for\n # OSError)\n o = b''\n # status previously used by exec_command() for parent\n # of OSError\n s = 127\n else:\n # use a convenience return here so that any kind of\n # caught exception will execute the default code after the\n # try / except block, which handles various exceptions\n return None\n\n if is_sequence(cmd):\n cmd = ' '.join(list(cmd))\n\n forward_bytes_to_stdout(o)\n\n if re.search(b'Too many open files', o):\n msg = '\\nTry rerunning setup command until build succeeds.'\n else:\n msg = ''\n raise DistutilsExecError('Command \"%s\" failed with exit status %d%s' %\n (cmd, s, msg))\n\nreplace_method(CCompiler, 'spawn', CCompiler_spawn)\n\ndef CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):\n \"\"\"\n Return the name of the object files for the given source files.\n\n Parameters\n ----------\n source_filenames : list of str\n The list of paths to source files. Paths can be either relative or\n absolute, this is handled transparently.\n strip_dir : bool, optional\n Whether to strip the directory from the returned paths. If True,\n the file name prepended by `output_dir` is returned. Default is False.\n output_dir : str, optional\n If given, this path is prepended to the returned paths to the\n object files.\n\n Returns\n -------\n obj_names : list of str\n The list of paths to the object files corresponding to the source\n files in `source_filenames`.\n\n \"\"\"\n if output_dir is None:\n output_dir = ''\n obj_names = []\n for src_name in source_filenames:\n base, ext = os.path.splitext(os.path.normpath(src_name))\n base = os.path.splitdrive(base)[1] # Chop off the drive\n base = base[os.path.isabs(base):] # If abs, chop off leading /\n if base.startswith('..'):\n # Resolve starting relative path components, middle ones\n # (if any) have been handled by os.path.normpath above.\n i = base.rfind('..')+2\n d = base[:i]\n d = os.path.basename(os.path.abspath(d))\n base = d + base[i:]\n if ext not in self.src_extensions:\n raise UnknownFileError(\"unknown file type '%s' (from '%s')\" % (ext, src_name))\n if strip_dir:\n base = os.path.basename(base)\n obj_name = os.path.join(output_dir, base + self.obj_extension)\n obj_names.append(obj_name)\n return obj_names\n\nreplace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)\n\ndef CCompiler_compile(self, sources, output_dir=None, macros=None,\n include_dirs=None, debug=0, extra_preargs=None,\n extra_postargs=None, depends=None):\n \"\"\"\n Compile one or more source files.\n\n Please refer to the Python distutils API reference for more details.\n\n Parameters\n ----------\n sources : list of str\n A list of filenames\n output_dir : str, optional\n Path to the output directory.\n macros : list of tuples\n A list of macro definitions.\n include_dirs : list of str, optional\n The directories to add to the default include file search path for\n this compilation only.\n debug : bool, optional\n Whether or not to output debug symbols in or alongside the object\n file(s).\n extra_preargs, extra_postargs : ?\n Extra pre- and post-arguments.\n depends : list of str, optional\n A list of file names that all targets depend on.\n\n Returns\n -------\n objects : list of str\n A list of object file names, one per source file `sources`.\n\n Raises\n ------\n CompileError\n If compilation fails.\n\n \"\"\"\n # This method is effective only with Python >=2.3 distutils.\n # Any changes here should be applied also to fcompiler.compile\n # method to support pre Python 2.3 distutils.\n global _job_semaphore\n\n jobs = get_num_build_jobs()\n\n # setup semaphore to not exceed number of compile jobs when parallelized at\n # extension level (python >= 3.5)\n with _global_lock:\n if _job_semaphore is None:\n _job_semaphore = threading.Semaphore(jobs)\n\n if not sources:\n return []\n # FIXME:RELATIVE_IMPORT\n if sys.version_info[0] < 3:\n from .fcompiler import FCompiler, is_f_file, has_f90_header\n else:\n from numpy.distutils.fcompiler import (FCompiler, is_f_file,\n has_f90_header)\n if isinstance(self, FCompiler):\n display = []\n for fc in ['f77', 'f90', 'fix']:\n fcomp = getattr(self, 'compiler_'+fc)\n if fcomp is None:\n continue\n display.append(\"Fortran %s compiler: %s\" % (fc, ' '.join(fcomp)))\n display = '\\n'.join(display)\n else:\n ccomp = self.compiler_so\n display = \"C compiler: %s\\n\" % (' '.join(ccomp),)\n log.info(display)\n macros, objects, extra_postargs, pp_opts, build = \\\n self._setup_compile(output_dir, macros, include_dirs, sources,\n depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n display = \"compile options: '%s'\" % (' '.join(cc_args))\n if extra_postargs:\n display += \"\\nextra options: '%s'\" % (' '.join(extra_postargs))\n log.info(display)\n\n def single_compile(args):\n obj, (src, ext) = args\n if not _needs_build(obj, cc_args, extra_postargs, pp_opts):\n return\n\n # check if we are currently already processing the same object\n # happens when using the same source in multiple extensions\n while True:\n # need explicit lock as there is no atomic check and add with GIL\n with _global_lock:\n # file not being worked on, start working\n if obj not in _processing_files:\n _processing_files.add(obj)\n break\n # wait for the processing to end\n time.sleep(0.1)\n\n try:\n # retrieve slot from our #job semaphore and build\n with _job_semaphore:\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n finally:\n # register being done processing\n with _global_lock:\n _processing_files.remove(obj)\n\n\n if isinstance(self, FCompiler):\n objects_to_build = list(build.keys())\n f77_objects, other_objects = [], []\n for obj in objects:\n if obj in objects_to_build:\n src, ext = build[obj]\n if self.compiler_type=='absoft':\n obj = cyg2win32(obj)\n src = cyg2win32(src)\n if is_f_file(src) and not has_f90_header(src):\n f77_objects.append((obj, (src, ext)))\n else:\n other_objects.append((obj, (src, ext)))\n\n # f77 objects can be built in parallel\n build_items = f77_objects\n # build f90 modules serial, module files are generated during\n # compilation and may be used by files later in the list so the\n # ordering is important\n for o in other_objects:\n single_compile(o)\n else:\n build_items = build.items()\n\n if len(build) > 1 and jobs > 1:\n # build parallel\n import multiprocessing.pool\n pool = multiprocessing.pool.ThreadPool(jobs)\n pool.map(single_compile, build_items)\n pool.close()\n else:\n # build serial\n for o in build_items:\n single_compile(o)\n\n # Return *all* object filenames, not just the ones we just built.\n return objects\n\nreplace_method(CCompiler, 'compile', CCompiler_compile)\n\ndef CCompiler_customize_cmd(self, cmd, ignore=()):\n \"\"\"\n Customize compiler using distutils command.\n\n Parameters\n ----------\n cmd : class instance\n An instance inheriting from `distutils.cmd.Command`.\n ignore : sequence of str, optional\n List of `CCompiler` commands (without ``'set_'``) that should not be\n altered. Strings that are checked for are:\n ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',\n 'rpath', 'link_objects')``.\n\n Returns\n -------\n None\n\n \"\"\"\n log.info('customize %s using %s' % (self.__class__.__name__,\n cmd.__class__.__name__))\n def allow(attr):\n return getattr(cmd, attr, None) is not None and attr not in ignore\n\n if allow('include_dirs'):\n self.set_include_dirs(cmd.include_dirs)\n if allow('define'):\n for (name, value) in cmd.define:\n self.define_macro(name, value)\n if allow('undef'):\n for macro in cmd.undef:\n self.undefine_macro(macro)\n if allow('libraries'):\n self.set_libraries(self.libraries + cmd.libraries)\n if allow('library_dirs'):\n self.set_library_dirs(self.library_dirs + cmd.library_dirs)\n if allow('rpath'):\n self.set_runtime_library_dirs(cmd.rpath)\n if allow('link_objects'):\n self.set_link_objects(cmd.link_objects)\n\nreplace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)\n\ndef _compiler_to_string(compiler):\n props = []\n mx = 0\n keys = list(compiler.executables.keys())\n for key in ['version', 'libraries', 'library_dirs',\n 'object_switch', 'compile_switch',\n 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:\n if key not in keys:\n keys.append(key)\n for key in keys:\n if hasattr(compiler, key):\n v = getattr(compiler, key)\n mx = max(mx, len(key))\n props.append((key, repr(v)))\n fmt = '%-' + repr(mx+1) + 's = %s'\n lines = [fmt % prop for prop in props]\n return '\\n'.join(lines)\n\ndef CCompiler_show_customization(self):\n \"\"\"\n Print the compiler customizations to stdout.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n Notes\n -----\n Printing is only done if the distutils log threshold is < 2.\n\n \"\"\"\n if 0:\n for attrname in ['include_dirs', 'define', 'undef',\n 'libraries', 'library_dirs',\n 'rpath', 'link_objects']:\n attr = getattr(self, attrname, None)\n if not attr:\n continue\n log.info(\"compiler '%s' is set to %s\" % (attrname, attr))\n try:\n self.get_version()\n except Exception:\n pass\n if log._global_log.threshold<2:\n print('*'*80)\n print(self.__class__)\n print(_compiler_to_string(self))\n print('*'*80)\n\nreplace_method(CCompiler, 'show_customization', CCompiler_show_customization)\n\ndef CCompiler_customize(self, dist, need_cxx=0):\n \"\"\"\n Do any platform-specific customization of a compiler instance.\n\n This method calls `distutils.sysconfig.customize_compiler` for\n platform-specific customization, as well as optionally remove a flag\n to suppress spurious warnings in case C++ code is being compiled.\n\n Parameters\n ----------\n dist : object\n This parameter is not used for anything.\n need_cxx : bool, optional\n Whether or not C++ has to be compiled. If so (True), the\n ``\"-Wstrict-prototypes\"`` option is removed to prevent spurious\n warnings. Default is False.\n\n Returns\n -------\n None\n\n Notes\n -----\n All the default options used by distutils can be extracted with::\n\n from distutils import sysconfig\n sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',\n 'CCSHARED', 'LDSHARED', 'SO')\n\n \"\"\"\n # See FCompiler.customize for suggested usage.\n log.info('customize %s' % (self.__class__.__name__))\n customize_compiler(self)\n if need_cxx:\n # In general, distutils uses -Wstrict-prototypes, but this option is\n # not valid for C++ code, only for C. Remove it if it's there to\n # avoid a spurious warning on every compilation.\n try:\n self.compiler_so.remove('-Wstrict-prototypes')\n except (AttributeError, ValueError):\n pass\n\n if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:\n if not self.compiler_cxx:\n if self.compiler[0].startswith('gcc'):\n a, b = 'gcc', 'g++'\n else:\n a, b = 'cc', 'c++'\n self.compiler_cxx = [self.compiler[0].replace(a, b)]\\\n + self.compiler[1:]\n else:\n if hasattr(self, 'compiler'):\n log.warn(\"#### %s #######\" % (self.compiler,))\n if not hasattr(self, 'compiler_cxx'):\n log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)\n\n\n # check if compiler supports gcc style automatic dependencies\n # run on every extension so skip for known good compilers\n if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or\n 'g++' in self.compiler[0] or\n 'clang' in self.compiler[0]):\n self._auto_depends = True\n elif os.name == 'posix':\n import tempfile\n import shutil\n tmpdir = tempfile.mkdtemp()\n try:\n fn = os.path.join(tmpdir, \"file.c\")\n with open(fn, \"w\") as f:\n f.write(\"int a;\\n\")\n self.compile([fn], output_dir=tmpdir,\n extra_preargs=['-MMD', '-MF', fn + '.d'])\n self._auto_depends = True\n except CompileError:\n self._auto_depends = False\n finally:\n shutil.rmtree(tmpdir)\n\n return\n\nreplace_method(CCompiler, 'customize', CCompiler_customize)\n\ndef simple_version_match(pat=r'[-.\\d]+', ignore='', start=''):\n \"\"\"\n Simple matching of version numbers, for use in CCompiler and FCompiler.\n\n Parameters\n ----------\n pat : str, optional\n A regular expression matching version numbers.\n Default is ``r'[-.\\\\d]+'``.\n ignore : str, optional\n A regular expression matching patterns to skip.\n Default is ``''``, in which case nothing is skipped.\n start : str, optional\n A regular expression matching the start of where to start looking\n for version numbers.\n Default is ``''``, in which case searching is started at the\n beginning of the version string given to `matcher`.\n\n Returns\n -------\n matcher : callable\n A function that is appropriate to use as the ``.version_match``\n attribute of a `CCompiler` class. `matcher` takes a single parameter,\n a version string.\n\n \"\"\"\n def matcher(self, version_string):\n # version string may appear in the second line, so getting rid\n # of new lines:\n version_string = version_string.replace('\\n', ' ')\n pos = 0\n if start:\n m = re.match(start, version_string)\n if not m:\n return None\n pos = m.end()\n while True:\n m = re.search(pat, version_string[pos:])\n if not m:\n return None\n if ignore and re.match(ignore, m.group(0)):\n pos = m.end()\n continue\n break\n return m.group(0)\n return matcher\n\ndef CCompiler_get_version(self, force=False, ok_status=[0]):\n \"\"\"\n Return compiler version, or None if compiler is not available.\n\n Parameters\n ----------\n force : bool, optional\n If True, force a new determination of the version, even if the\n compiler already has a version attribute. Default is False.\n ok_status : list of int, optional\n The list of status values returned by the version look-up process\n for which a version string is returned. If the status value is not\n in `ok_status`, None is returned. Default is ``[0]``.\n\n Returns\n -------\n version : str or None\n Version string, in the format of `distutils.version.LooseVersion`.\n\n \"\"\"\n if not force and hasattr(self, 'version'):\n return self.version\n self.find_executables()\n try:\n version_cmd = self.version_cmd\n except AttributeError:\n return None\n if not version_cmd or not version_cmd[0]:\n return None\n try:\n matcher = self.version_match\n except AttributeError:\n try:\n pat = self.version_pattern\n except AttributeError:\n return None\n def matcher(version_string):\n m = re.match(pat, version_string)\n if not m:\n return None\n version = m.group('version')\n return version\n\n try:\n output = subprocess.check_output(version_cmd)\n except subprocess.CalledProcessError as exc:\n output = exc.output\n status = exc.returncode\n except OSError:\n # match the historical returns for a parent\n # exception class caught by exec_command()\n status = 127\n output = b''\n else:\n # output isn't actually a filepath but we do this\n # for now to match previous distutils behavior\n output = filepath_from_subprocess_output(output)\n status = 0\n\n version = None\n if status in ok_status:\n version = matcher(output)\n if version:\n version = LooseVersion(version)\n self.version = version\n return version\n\nreplace_method(CCompiler, 'get_version', CCompiler_get_version)\n\ndef CCompiler_cxx_compiler(self):\n \"\"\"\n Return the C++ compiler.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n cxx : class instance\n The C++ compiler, as a `CCompiler` instance.\n\n \"\"\"\n if self.compiler_type in ('msvc', 'intelw', 'intelemw'):\n return self\n\n cxx = copy(self)\n cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]\n if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:\n # AIX needs the ld_so_aix script included with Python\n cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \\\n + cxx.linker_so[2:]\n else:\n cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]\n return cxx\n\nreplace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)\n\ncompiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',\n \"Intel C Compiler for 32-bit applications\")\ncompiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',\n \"Intel C Itanium Compiler for Itanium-based applications\")\ncompiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',\n \"Intel C Compiler for 64-bit applications\")\ncompiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',\n \"Intel C Compiler for 32-bit applications on Windows\")\ncompiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',\n \"Intel C Compiler for 64-bit applications on Windows\")\ncompiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',\n \"PathScale Compiler for SiCortex-based applications\")\nccompiler._default_compilers += (('linux.*', 'intel'),\n ('linux.*', 'intele'),\n ('linux.*', 'intelem'),\n ('linux.*', 'pathcc'),\n ('nt', 'intelw'),\n ('nt', 'intelemw'))\n\nif sys.platform == 'win32':\n compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',\n \"Mingw32 port of GNU C Compiler for Win32\"\\\n \"(for MSC built Python)\")\n if mingw32():\n # On windows platforms, we want to default to mingw32 (gcc)\n # because msvc can't build blitz stuff.\n log.info('Setting mingw32 as default compiler for nt.')\n ccompiler._default_compilers = (('nt', 'mingw32'),) \\\n + ccompiler._default_compilers\n\n\n_distutils_new_compiler = new_compiler\ndef new_compiler (plat=None,\n compiler=None,\n verbose=0,\n dry_run=0,\n force=0):\n # Try first C compilers from numpy.distutils.\n if plat is None:\n plat = os.name\n try:\n if compiler is None:\n compiler = get_default_compiler(plat)\n (module_name, class_name, long_description) = compiler_class[compiler]\n except KeyError:\n msg = \"don't know how to compile C/C++ code on platform '%s'\" % plat\n if compiler is not None:\n msg = msg + \" with '%s' compiler\" % compiler\n raise DistutilsPlatformError(msg)\n module_name = \"numpy.distutils.\" + module_name\n try:\n __import__ (module_name)\n except ImportError:\n msg = str(get_exception())\n log.info('%s in numpy.distutils; trying from distutils',\n str(msg))\n module_name = module_name[6:]\n try:\n __import__(module_name)\n except ImportError:\n msg = str(get_exception())\n raise DistutilsModuleError(\"can't compile C/C++ code: unable to load module '%s'\" % \\\n module_name)\n try:\n module = sys.modules[module_name]\n klass = vars(module)[class_name]\n except KeyError:\n raise DistutilsModuleError((\"can't compile C/C++ code: unable to find class '%s' \" +\n \"in module '%s'\") % (class_name, module_name))\n compiler = klass(None, dry_run, force)\n log.debug('new_compiler returns %s' % (klass))\n return compiler\n\nccompiler.new_compiler = new_compiler\n\n_distutils_gen_lib_options = gen_lib_options\ndef gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):\n # the version of this function provided by CPython allows the following\n # to return lists, which are unpacked automatically:\n # - compiler.runtime_library_dir_option\n # our version extends the behavior to:\n # - compiler.library_dir_option\n # - compiler.library_option\n # - compiler.find_library_file\n r = _distutils_gen_lib_options(compiler, library_dirs,\n runtime_library_dirs, libraries)\n lib_opts = []\n for i in r:\n if is_sequence(i):\n lib_opts.extend(list(i))\n else:\n lib_opts.append(i)\n return lib_opts\nccompiler.gen_lib_options = gen_lib_options\n\n# Also fix up the various compiler modules, which do\n# from distutils.ccompiler import gen_lib_options\n# Don't bother with mwerks, as we don't support Classic Mac.\nfor _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:\n _m = sys.modules.get('distutils.' + _cc + 'compiler')\n if _m is not None:\n setattr(_m, 'gen_lib_options', gen_lib_options)\n\n\n##Fix distutils.util.split_quoted:\n# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears\n# that removing this fix causes f2py problems on Windows XP (see ticket #723).\n# Specifically, on WinXP when gfortran is installed in a directory path, which\n# contains spaces, then f2py is unable to find it.\nimport string\n_wordchars_re = re.compile(r'[^\\\\\\'\\\"%s ]*' % string.whitespace)\n_squote_re = re.compile(r\"'(?:[^'\\\\]|\\\\.)*'\")\n_dquote_re = re.compile(r'\"(?:[^\"\\\\]|\\\\.)*\"')\n_has_white_re = re.compile(r'\\s')\ndef split_quoted(s):\n s = s.strip()\n words = []\n pos = 0\n\n while s:\n m = _wordchars_re.match(s, pos)\n end = m.end()\n if end == len(s):\n words.append(s[:end])\n break\n\n if s[end] in string.whitespace: # unescaped, unquoted whitespace: now\n words.append(s[:end]) # we definitely have a word delimiter\n s = s[end:].lstrip()\n pos = 0\n\n elif s[end] == '\\\\': # preserve whatever is being escaped;\n # will become part of the current word\n s = s[:end] + s[end+1:]\n pos = end+1\n\n else:\n if s[end] == \"'\": # slurp singly-quoted string\n m = _squote_re.match(s, end)\n elif s[end] == '\"': # slurp doubly-quoted string\n m = _dquote_re.match(s, end)\n else:\n raise RuntimeError(\"this can't happen (bad char '%c')\" % s[end])\n\n if m is None:\n raise ValueError(\"bad string (mismatched %s quotes?)\" % s[end])\n\n (beg, end) = m.span()\n if _has_white_re.search(s[beg+1:end-1]):\n s = s[:beg] + s[beg+1:end-1] + s[end:]\n pos = m.end() - 2\n else:\n # Keeping quotes when a quoted word does not contain\n # white-space. XXX: send a patch to distutils\n pos = m.end()\n\n if pos >= len(s):\n words.append(s)\n break\n\n return words\nccompiler.split_quoted = split_quoted\n##Fix distutils.util.split_quoted:\n", "path": "numpy/distutils/ccompiler.py" } ]
[ { "content": "from __future__ import division, absolute_import, print_function\n\nimport os\nimport re\nimport sys\nimport types\nimport shlex\nimport time\nimport subprocess\nfrom copy import copy\nfrom distutils import ccompiler\nfrom distutils.ccompiler import *\nfrom distutils.errors import DistutilsExecError, DistutilsModuleError, \\\n DistutilsPlatformError, CompileError\nfrom distutils.sysconfig import customize_compiler\nfrom distutils.version import LooseVersion\n\nfrom numpy.distutils import log\nfrom numpy.distutils.compat import get_exception\nfrom numpy.distutils.exec_command import (\n filepath_from_subprocess_output, forward_bytes_to_stdout\n)\nfrom numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \\\n get_num_build_jobs, \\\n _commandline_dep_string\n\n# globals for parallel build management\ntry:\n import threading\nexcept ImportError:\n import dummy_threading as threading\n_job_semaphore = None\n_global_lock = threading.Lock()\n_processing_files = set()\n\n\ndef _needs_build(obj, cc_args, extra_postargs, pp_opts):\n \"\"\"\n Check if an objects needs to be rebuild based on its dependencies\n\n Parameters\n ----------\n obj : str\n object file\n\n Returns\n -------\n bool\n \"\"\"\n # defined in unixcompiler.py\n dep_file = obj + '.d'\n if not os.path.exists(dep_file):\n return True\n\n # dep_file is a makefile containing 'object: dependencies'\n # formatted like posix shell (spaces escaped, \\ line continuations)\n # the last line contains the compiler commandline arguments as some\n # projects may compile an extension multiple times with different\n # arguments\n with open(dep_file, \"r\") as f:\n lines = f.readlines()\n\n cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts)\n last_cmdline = lines[-1]\n if last_cmdline != cmdline:\n return True\n\n contents = ''.join(lines[:-1])\n deps = [x for x in shlex.split(contents, posix=True)\n if x != \"\\n\" and not x.endswith(\":\")]\n\n try:\n t_obj = os.stat(obj).st_mtime\n\n # check if any of the dependencies is newer than the object\n # the dependencies includes the source used to create the object\n for f in deps:\n if os.stat(f).st_mtime > t_obj:\n return True\n except OSError:\n # no object counts as newer (shouldn't happen if dep_file exists)\n return True\n\n return False\n\n\ndef replace_method(klass, method_name, func):\n if sys.version_info[0] < 3:\n m = types.MethodType(func, None, klass)\n else:\n # Py3k does not have unbound method anymore, MethodType does not work\n m = lambda self, *args, **kw: func(self, *args, **kw)\n setattr(klass, method_name, m)\n\n\n######################################################################\n## Method that subclasses may redefine. But don't call this method,\n## it i private to CCompiler class and may return unexpected\n## results if used elsewhere. So, you have been warned..\n\ndef CCompiler_find_executables(self):\n \"\"\"\n Does nothing here, but is called by the get_version method and can be\n overridden by subclasses. In particular it is redefined in the `FCompiler`\n class where more documentation can be found.\n\n \"\"\"\n pass\n\n\nreplace_method(CCompiler, 'find_executables', CCompiler_find_executables)\n\n\n# Using customized CCompiler.spawn.\ndef CCompiler_spawn(self, cmd, display=None):\n \"\"\"\n Execute a command in a sub-process.\n\n Parameters\n ----------\n cmd : str\n The command to execute.\n display : str or sequence of str, optional\n The text to add to the log file kept by `numpy.distutils`.\n If not given, `display` is equal to `cmd`.\n\n Returns\n -------\n None\n\n Raises\n ------\n DistutilsExecError\n If the command failed, i.e. the exit status was not 0.\n\n \"\"\"\n if display is None:\n display = cmd\n if is_sequence(display):\n display = ' '.join(list(display))\n log.info(display)\n try:\n subprocess.check_output(cmd)\n except subprocess.CalledProcessError as exc:\n o = exc.output\n s = exc.returncode\n except OSError:\n # OSError doesn't have the same hooks for the exception\n # output, but exec_command() historically would use an\n # empty string for EnvironmentError (base class for\n # OSError)\n o = b''\n # status previously used by exec_command() for parent\n # of OSError\n s = 127\n else:\n # use a convenience return here so that any kind of\n # caught exception will execute the default code after the\n # try / except block, which handles various exceptions\n return None\n\n if is_sequence(cmd):\n cmd = ' '.join(list(cmd))\n\n forward_bytes_to_stdout(o)\n\n if re.search(b'Too many open files', o):\n msg = '\\nTry rerunning setup command until build succeeds.'\n else:\n msg = ''\n raise DistutilsExecError('Command \"%s\" failed with exit status %d%s' %\n (cmd, s, msg))\n\nreplace_method(CCompiler, 'spawn', CCompiler_spawn)\n\ndef CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):\n \"\"\"\n Return the name of the object files for the given source files.\n\n Parameters\n ----------\n source_filenames : list of str\n The list of paths to source files. Paths can be either relative or\n absolute, this is handled transparently.\n strip_dir : bool, optional\n Whether to strip the directory from the returned paths. If True,\n the file name prepended by `output_dir` is returned. Default is False.\n output_dir : str, optional\n If given, this path is prepended to the returned paths to the\n object files.\n\n Returns\n -------\n obj_names : list of str\n The list of paths to the object files corresponding to the source\n files in `source_filenames`.\n\n \"\"\"\n if output_dir is None:\n output_dir = ''\n obj_names = []\n for src_name in source_filenames:\n base, ext = os.path.splitext(os.path.normpath(src_name))\n base = os.path.splitdrive(base)[1] # Chop off the drive\n base = base[os.path.isabs(base):] # If abs, chop off leading /\n if base.startswith('..'):\n # Resolve starting relative path components, middle ones\n # (if any) have been handled by os.path.normpath above.\n i = base.rfind('..')+2\n d = base[:i]\n d = os.path.basename(os.path.abspath(d))\n base = d + base[i:]\n if ext not in self.src_extensions:\n raise UnknownFileError(\"unknown file type '%s' (from '%s')\" % (ext, src_name))\n if strip_dir:\n base = os.path.basename(base)\n obj_name = os.path.join(output_dir, base + self.obj_extension)\n obj_names.append(obj_name)\n return obj_names\n\nreplace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)\n\ndef CCompiler_compile(self, sources, output_dir=None, macros=None,\n include_dirs=None, debug=0, extra_preargs=None,\n extra_postargs=None, depends=None):\n \"\"\"\n Compile one or more source files.\n\n Please refer to the Python distutils API reference for more details.\n\n Parameters\n ----------\n sources : list of str\n A list of filenames\n output_dir : str, optional\n Path to the output directory.\n macros : list of tuples\n A list of macro definitions.\n include_dirs : list of str, optional\n The directories to add to the default include file search path for\n this compilation only.\n debug : bool, optional\n Whether or not to output debug symbols in or alongside the object\n file(s).\n extra_preargs, extra_postargs : ?\n Extra pre- and post-arguments.\n depends : list of str, optional\n A list of file names that all targets depend on.\n\n Returns\n -------\n objects : list of str\n A list of object file names, one per source file `sources`.\n\n Raises\n ------\n CompileError\n If compilation fails.\n\n \"\"\"\n # This method is effective only with Python >=2.3 distutils.\n # Any changes here should be applied also to fcompiler.compile\n # method to support pre Python 2.3 distutils.\n global _job_semaphore\n\n jobs = get_num_build_jobs()\n\n # setup semaphore to not exceed number of compile jobs when parallelized at\n # extension level (python >= 3.5)\n with _global_lock:\n if _job_semaphore is None:\n _job_semaphore = threading.Semaphore(jobs)\n\n if not sources:\n return []\n # FIXME:RELATIVE_IMPORT\n if sys.version_info[0] < 3:\n from .fcompiler import FCompiler, is_f_file, has_f90_header\n else:\n from numpy.distutils.fcompiler import (FCompiler, is_f_file,\n has_f90_header)\n if isinstance(self, FCompiler):\n display = []\n for fc in ['f77', 'f90', 'fix']:\n fcomp = getattr(self, 'compiler_'+fc)\n if fcomp is None:\n continue\n display.append(\"Fortran %s compiler: %s\" % (fc, ' '.join(fcomp)))\n display = '\\n'.join(display)\n else:\n ccomp = self.compiler_so\n display = \"C compiler: %s\\n\" % (' '.join(ccomp),)\n log.info(display)\n macros, objects, extra_postargs, pp_opts, build = \\\n self._setup_compile(output_dir, macros, include_dirs, sources,\n depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n display = \"compile options: '%s'\" % (' '.join(cc_args))\n if extra_postargs:\n display += \"\\nextra options: '%s'\" % (' '.join(extra_postargs))\n log.info(display)\n\n def single_compile(args):\n obj, (src, ext) = args\n if not _needs_build(obj, cc_args, extra_postargs, pp_opts):\n return\n\n # check if we are currently already processing the same object\n # happens when using the same source in multiple extensions\n while True:\n # need explicit lock as there is no atomic check and add with GIL\n with _global_lock:\n # file not being worked on, start working\n if obj not in _processing_files:\n _processing_files.add(obj)\n break\n # wait for the processing to end\n time.sleep(0.1)\n\n try:\n # retrieve slot from our #job semaphore and build\n with _job_semaphore:\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n finally:\n # register being done processing\n with _global_lock:\n _processing_files.remove(obj)\n\n\n if isinstance(self, FCompiler):\n objects_to_build = list(build.keys())\n f77_objects, other_objects = [], []\n for obj in objects:\n if obj in objects_to_build:\n src, ext = build[obj]\n if self.compiler_type=='absoft':\n obj = cyg2win32(obj)\n src = cyg2win32(src)\n if is_f_file(src) and not has_f90_header(src):\n f77_objects.append((obj, (src, ext)))\n else:\n other_objects.append((obj, (src, ext)))\n\n # f77 objects can be built in parallel\n build_items = f77_objects\n # build f90 modules serial, module files are generated during\n # compilation and may be used by files later in the list so the\n # ordering is important\n for o in other_objects:\n single_compile(o)\n else:\n build_items = build.items()\n\n if len(build) > 1 and jobs > 1:\n # build parallel\n import multiprocessing.pool\n pool = multiprocessing.pool.ThreadPool(jobs)\n pool.map(single_compile, build_items)\n pool.close()\n else:\n # build serial\n for o in build_items:\n single_compile(o)\n\n # Return *all* object filenames, not just the ones we just built.\n return objects\n\nreplace_method(CCompiler, 'compile', CCompiler_compile)\n\ndef CCompiler_customize_cmd(self, cmd, ignore=()):\n \"\"\"\n Customize compiler using distutils command.\n\n Parameters\n ----------\n cmd : class instance\n An instance inheriting from `distutils.cmd.Command`.\n ignore : sequence of str, optional\n List of `CCompiler` commands (without ``'set_'``) that should not be\n altered. Strings that are checked for are:\n ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',\n 'rpath', 'link_objects')``.\n\n Returns\n -------\n None\n\n \"\"\"\n log.info('customize %s using %s' % (self.__class__.__name__,\n cmd.__class__.__name__))\n def allow(attr):\n return getattr(cmd, attr, None) is not None and attr not in ignore\n\n if allow('include_dirs'):\n self.set_include_dirs(cmd.include_dirs)\n if allow('define'):\n for (name, value) in cmd.define:\n self.define_macro(name, value)\n if allow('undef'):\n for macro in cmd.undef:\n self.undefine_macro(macro)\n if allow('libraries'):\n self.set_libraries(self.libraries + cmd.libraries)\n if allow('library_dirs'):\n self.set_library_dirs(self.library_dirs + cmd.library_dirs)\n if allow('rpath'):\n self.set_runtime_library_dirs(cmd.rpath)\n if allow('link_objects'):\n self.set_link_objects(cmd.link_objects)\n\nreplace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)\n\ndef _compiler_to_string(compiler):\n props = []\n mx = 0\n keys = list(compiler.executables.keys())\n for key in ['version', 'libraries', 'library_dirs',\n 'object_switch', 'compile_switch',\n 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:\n if key not in keys:\n keys.append(key)\n for key in keys:\n if hasattr(compiler, key):\n v = getattr(compiler, key)\n mx = max(mx, len(key))\n props.append((key, repr(v)))\n fmt = '%-' + repr(mx+1) + 's = %s'\n lines = [fmt % prop for prop in props]\n return '\\n'.join(lines)\n\ndef CCompiler_show_customization(self):\n \"\"\"\n Print the compiler customizations to stdout.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n Notes\n -----\n Printing is only done if the distutils log threshold is < 2.\n\n \"\"\"\n if 0:\n for attrname in ['include_dirs', 'define', 'undef',\n 'libraries', 'library_dirs',\n 'rpath', 'link_objects']:\n attr = getattr(self, attrname, None)\n if not attr:\n continue\n log.info(\"compiler '%s' is set to %s\" % (attrname, attr))\n try:\n self.get_version()\n except Exception:\n pass\n if log._global_log.threshold<2:\n print('*'*80)\n print(self.__class__)\n print(_compiler_to_string(self))\n print('*'*80)\n\nreplace_method(CCompiler, 'show_customization', CCompiler_show_customization)\n\ndef CCompiler_customize(self, dist, need_cxx=0):\n \"\"\"\n Do any platform-specific customization of a compiler instance.\n\n This method calls `distutils.sysconfig.customize_compiler` for\n platform-specific customization, as well as optionally remove a flag\n to suppress spurious warnings in case C++ code is being compiled.\n\n Parameters\n ----------\n dist : object\n This parameter is not used for anything.\n need_cxx : bool, optional\n Whether or not C++ has to be compiled. If so (True), the\n ``\"-Wstrict-prototypes\"`` option is removed to prevent spurious\n warnings. Default is False.\n\n Returns\n -------\n None\n\n Notes\n -----\n All the default options used by distutils can be extracted with::\n\n from distutils import sysconfig\n sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',\n 'CCSHARED', 'LDSHARED', 'SO')\n\n \"\"\"\n # See FCompiler.customize for suggested usage.\n log.info('customize %s' % (self.__class__.__name__))\n customize_compiler(self)\n if need_cxx:\n # In general, distutils uses -Wstrict-prototypes, but this option is\n # not valid for C++ code, only for C. Remove it if it's there to\n # avoid a spurious warning on every compilation.\n try:\n self.compiler_so.remove('-Wstrict-prototypes')\n except (AttributeError, ValueError):\n pass\n\n if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:\n if not self.compiler_cxx:\n if self.compiler[0].startswith('gcc'):\n a, b = 'gcc', 'g++'\n else:\n a, b = 'cc', 'c++'\n self.compiler_cxx = [self.compiler[0].replace(a, b)]\\\n + self.compiler[1:]\n else:\n if hasattr(self, 'compiler'):\n log.warn(\"#### %s #######\" % (self.compiler,))\n if not hasattr(self, 'compiler_cxx'):\n log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)\n\n\n # check if compiler supports gcc style automatic dependencies\n # run on every extension so skip for known good compilers\n if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or\n 'g++' in self.compiler[0] or\n 'clang' in self.compiler[0]):\n self._auto_depends = True\n elif os.name == 'posix':\n import tempfile\n import shutil\n tmpdir = tempfile.mkdtemp()\n try:\n fn = os.path.join(tmpdir, \"file.c\")\n with open(fn, \"w\") as f:\n f.write(\"int a;\\n\")\n self.compile([fn], output_dir=tmpdir,\n extra_preargs=['-MMD', '-MF', fn + '.d'])\n self._auto_depends = True\n except CompileError:\n self._auto_depends = False\n finally:\n shutil.rmtree(tmpdir)\n\n return\n\nreplace_method(CCompiler, 'customize', CCompiler_customize)\n\ndef simple_version_match(pat=r'[-.\\d]+', ignore='', start=''):\n \"\"\"\n Simple matching of version numbers, for use in CCompiler and FCompiler.\n\n Parameters\n ----------\n pat : str, optional\n A regular expression matching version numbers.\n Default is ``r'[-.\\\\d]+'``.\n ignore : str, optional\n A regular expression matching patterns to skip.\n Default is ``''``, in which case nothing is skipped.\n start : str, optional\n A regular expression matching the start of where to start looking\n for version numbers.\n Default is ``''``, in which case searching is started at the\n beginning of the version string given to `matcher`.\n\n Returns\n -------\n matcher : callable\n A function that is appropriate to use as the ``.version_match``\n attribute of a `CCompiler` class. `matcher` takes a single parameter,\n a version string.\n\n \"\"\"\n def matcher(self, version_string):\n # version string may appear in the second line, so getting rid\n # of new lines:\n version_string = version_string.replace('\\n', ' ')\n pos = 0\n if start:\n m = re.match(start, version_string)\n if not m:\n return None\n pos = m.end()\n while True:\n m = re.search(pat, version_string[pos:])\n if not m:\n return None\n if ignore and re.match(ignore, m.group(0)):\n pos = m.end()\n continue\n break\n return m.group(0)\n return matcher\n\ndef CCompiler_get_version(self, force=False, ok_status=[0]):\n \"\"\"\n Return compiler version, or None if compiler is not available.\n\n Parameters\n ----------\n force : bool, optional\n If True, force a new determination of the version, even if the\n compiler already has a version attribute. Default is False.\n ok_status : list of int, optional\n The list of status values returned by the version look-up process\n for which a version string is returned. If the status value is not\n in `ok_status`, None is returned. Default is ``[0]``.\n\n Returns\n -------\n version : str or None\n Version string, in the format of `distutils.version.LooseVersion`.\n\n \"\"\"\n if not force and hasattr(self, 'version'):\n return self.version\n self.find_executables()\n try:\n version_cmd = self.version_cmd\n except AttributeError:\n return None\n if not version_cmd or not version_cmd[0]:\n return None\n try:\n matcher = self.version_match\n except AttributeError:\n try:\n pat = self.version_pattern\n except AttributeError:\n return None\n def matcher(version_string):\n m = re.match(pat, version_string)\n if not m:\n return None\n version = m.group('version')\n return version\n\n try:\n output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as exc:\n output = exc.output\n status = exc.returncode\n except OSError:\n # match the historical returns for a parent\n # exception class caught by exec_command()\n status = 127\n output = b''\n else:\n # output isn't actually a filepath but we do this\n # for now to match previous distutils behavior\n output = filepath_from_subprocess_output(output)\n status = 0\n\n version = None\n if status in ok_status:\n version = matcher(output)\n if version:\n version = LooseVersion(version)\n self.version = version\n return version\n\nreplace_method(CCompiler, 'get_version', CCompiler_get_version)\n\ndef CCompiler_cxx_compiler(self):\n \"\"\"\n Return the C++ compiler.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n cxx : class instance\n The C++ compiler, as a `CCompiler` instance.\n\n \"\"\"\n if self.compiler_type in ('msvc', 'intelw', 'intelemw'):\n return self\n\n cxx = copy(self)\n cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]\n if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:\n # AIX needs the ld_so_aix script included with Python\n cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \\\n + cxx.linker_so[2:]\n else:\n cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]\n return cxx\n\nreplace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)\n\ncompiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',\n \"Intel C Compiler for 32-bit applications\")\ncompiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',\n \"Intel C Itanium Compiler for Itanium-based applications\")\ncompiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',\n \"Intel C Compiler for 64-bit applications\")\ncompiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',\n \"Intel C Compiler for 32-bit applications on Windows\")\ncompiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',\n \"Intel C Compiler for 64-bit applications on Windows\")\ncompiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',\n \"PathScale Compiler for SiCortex-based applications\")\nccompiler._default_compilers += (('linux.*', 'intel'),\n ('linux.*', 'intele'),\n ('linux.*', 'intelem'),\n ('linux.*', 'pathcc'),\n ('nt', 'intelw'),\n ('nt', 'intelemw'))\n\nif sys.platform == 'win32':\n compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',\n \"Mingw32 port of GNU C Compiler for Win32\"\\\n \"(for MSC built Python)\")\n if mingw32():\n # On windows platforms, we want to default to mingw32 (gcc)\n # because msvc can't build blitz stuff.\n log.info('Setting mingw32 as default compiler for nt.')\n ccompiler._default_compilers = (('nt', 'mingw32'),) \\\n + ccompiler._default_compilers\n\n\n_distutils_new_compiler = new_compiler\ndef new_compiler (plat=None,\n compiler=None,\n verbose=0,\n dry_run=0,\n force=0):\n # Try first C compilers from numpy.distutils.\n if plat is None:\n plat = os.name\n try:\n if compiler is None:\n compiler = get_default_compiler(plat)\n (module_name, class_name, long_description) = compiler_class[compiler]\n except KeyError:\n msg = \"don't know how to compile C/C++ code on platform '%s'\" % plat\n if compiler is not None:\n msg = msg + \" with '%s' compiler\" % compiler\n raise DistutilsPlatformError(msg)\n module_name = \"numpy.distutils.\" + module_name\n try:\n __import__ (module_name)\n except ImportError:\n msg = str(get_exception())\n log.info('%s in numpy.distutils; trying from distutils',\n str(msg))\n module_name = module_name[6:]\n try:\n __import__(module_name)\n except ImportError:\n msg = str(get_exception())\n raise DistutilsModuleError(\"can't compile C/C++ code: unable to load module '%s'\" % \\\n module_name)\n try:\n module = sys.modules[module_name]\n klass = vars(module)[class_name]\n except KeyError:\n raise DistutilsModuleError((\"can't compile C/C++ code: unable to find class '%s' \" +\n \"in module '%s'\") % (class_name, module_name))\n compiler = klass(None, dry_run, force)\n log.debug('new_compiler returns %s' % (klass))\n return compiler\n\nccompiler.new_compiler = new_compiler\n\n_distutils_gen_lib_options = gen_lib_options\ndef gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):\n # the version of this function provided by CPython allows the following\n # to return lists, which are unpacked automatically:\n # - compiler.runtime_library_dir_option\n # our version extends the behavior to:\n # - compiler.library_dir_option\n # - compiler.library_option\n # - compiler.find_library_file\n r = _distutils_gen_lib_options(compiler, library_dirs,\n runtime_library_dirs, libraries)\n lib_opts = []\n for i in r:\n if is_sequence(i):\n lib_opts.extend(list(i))\n else:\n lib_opts.append(i)\n return lib_opts\nccompiler.gen_lib_options = gen_lib_options\n\n# Also fix up the various compiler modules, which do\n# from distutils.ccompiler import gen_lib_options\n# Don't bother with mwerks, as we don't support Classic Mac.\nfor _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:\n _m = sys.modules.get('distutils.' + _cc + 'compiler')\n if _m is not None:\n setattr(_m, 'gen_lib_options', gen_lib_options)\n\n\n##Fix distutils.util.split_quoted:\n# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears\n# that removing this fix causes f2py problems on Windows XP (see ticket #723).\n# Specifically, on WinXP when gfortran is installed in a directory path, which\n# contains spaces, then f2py is unable to find it.\nimport string\n_wordchars_re = re.compile(r'[^\\\\\\'\\\"%s ]*' % string.whitespace)\n_squote_re = re.compile(r\"'(?:[^'\\\\]|\\\\.)*'\")\n_dquote_re = re.compile(r'\"(?:[^\"\\\\]|\\\\.)*\"')\n_has_white_re = re.compile(r'\\s')\ndef split_quoted(s):\n s = s.strip()\n words = []\n pos = 0\n\n while s:\n m = _wordchars_re.match(s, pos)\n end = m.end()\n if end == len(s):\n words.append(s[:end])\n break\n\n if s[end] in string.whitespace: # unescaped, unquoted whitespace: now\n words.append(s[:end]) # we definitely have a word delimiter\n s = s[end:].lstrip()\n pos = 0\n\n elif s[end] == '\\\\': # preserve whatever is being escaped;\n # will become part of the current word\n s = s[:end] + s[end+1:]\n pos = end+1\n\n else:\n if s[end] == \"'\": # slurp singly-quoted string\n m = _squote_re.match(s, end)\n elif s[end] == '\"': # slurp doubly-quoted string\n m = _dquote_re.match(s, end)\n else:\n raise RuntimeError(\"this can't happen (bad char '%c')\" % s[end])\n\n if m is None:\n raise ValueError(\"bad string (mismatched %s quotes?)\" % s[end])\n\n (beg, end) = m.span()\n if _has_white_re.search(s[beg+1:end-1]):\n s = s[:beg] + s[beg+1:end-1] + s[end:]\n pos = m.end() - 2\n else:\n # Keeping quotes when a quoted word does not contain\n # white-space. XXX: send a patch to distutils\n pos = m.end()\n\n if pos >= len(s):\n words.append(s)\n break\n\n return words\nccompiler.split_quoted = split_quoted\n##Fix distutils.util.split_quoted:\n", "path": "numpy/distutils/ccompiler.py" } ]
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 100d0d06995f..552b9566f773 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -639,7 +639,7 @@ def matcher(version_string): return version try: - output = subprocess.check_output(version_cmd) + output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as exc: output = exc.output status = exc.returncode
kivy__kivy-4785
RecycleBoxLayout NameError ``` python from random import sample from string import ascii_lowercase from kivy.app import App from kivy.lang import Builder from kivy.uix.boxlayout import BoxLayout kv = """ <Row@BoxLayout>: value: '' size_hint: None, None size: self.minimum_size Label: text: root.value size_hint: None, None size: self.texture_size <Test>: rv: rv orientation: 'vertical' Button: text: 'Populate list' on_press: root.populate() RecycleView: id: rv viewclass: 'Row' RecycleBoxLayout: default_size: None, None size_hint_y: None height: self.minimum_height orientation: 'vertical' """ Builder.load_string(kv) class Test(BoxLayout): def populate(self): self.rv.data = [{'value': ''.join(sample(ascii_lowercase, 6))} for x in range(50)] class TestApp(App): def build(self): return Test() if __name__ == '__main__': TestApp().run() ``` ``` python Traceback (most recent call last): File "E:\dev\prs\kivy\examples\widgets\recycleview\basic_data.py", line 49, in <module> TestApp().run() File "E:\dev\prs\kivy\kivy\app.py", line 828, in run runTouchApp() File "E:\dev\prs\kivy\kivy\base.py", line 487, in runTouchApp EventLoop.window.mainloop() File "E:\dev\prs\kivy\kivy\core\window\window_sdl2.py", line 633, in mainloop self._mainloop() File "E:\dev\prs\kivy\kivy\core\window\window_sdl2.py", line 388, in _mainloop EventLoop.idle() File "E:\dev\prs\kivy\kivy\base.py", line 336, in idle Clock.tick_draw() File "E:\dev\prs\kivy\kivy\clock.py", line 528, in tick_draw self._process_events_before_frame() File "E:\dev\prs\kivy\kivy\clock.py", line 678, in _process_events_before_frame event.tick(self._last_tick, remove) File "E:\dev\prs\kivy\kivy\clock.py", line 412, in tick ret = callback(self._dt) File "E:\dev\prs\kivy\kivy\uix\recycleview\__init__.py", line 109, in refresh_views lm.compute_layout(data, f) File "E:\dev\prs\kivy\kivy\uix\recycleboxlayout.py", line 88, in compute_layout changed and not self._update_sizes(changed)): File "E:\dev\prs\kivy\kivy\uix\recycleboxlayout.py", line 81, in _update_sizes return relayout NameError: name 'relayout' is not defined ```
[ { "content": "\"\"\"\nRecycleBoxLayout\n================\n\n.. versionadded:: 1.9.2\n\n.. warning::\n This module is highly experimental, its API may change in the future and\n the documentation is not complete at this time.\n\nThe RecycleBoxLayout is designed to provide a\n:class:`~kivy.uix.boxlayout.BoxLayout` type layout when used with the\n:class:`~kivy.uix.recycleview.RecycleView` widget. Please refer to the\n:mod:`~kivy.uix.recycleview` module documentation for more information.\n\n\"\"\"\n\nfrom kivy.uix.recyclelayout import RecycleLayout\nfrom kivy.uix.boxlayout import BoxLayout\n\n__all__ = ('RecycleBoxLayout', )\n\n\nclass RecycleBoxLayout(RecycleLayout, BoxLayout):\n\n _rv_positions = None\n\n def __init__(self, **kwargs):\n super(RecycleBoxLayout, self).__init__(**kwargs)\n self.funbind('children', self._trigger_layout)\n\n def _update_sizes(self, changed):\n horizontal = self.orientation == 'horizontal'\n padding_left, padding_top, padding_right, padding_bottom = self.padding\n padding_x = padding_left + padding_right\n padding_y = padding_top + padding_bottom\n selfw = self.width\n selfh = self.height\n layout_w = max(0, selfw - padding_x)\n layout_h = max(0, selfh - padding_y)\n cx = self.x + padding_left\n cy = self.y + padding_bottom\n view_opts = self.view_opts\n remove_view = self.remove_view\n\n for (index, widget, (w, h), (wn, hn), (shw, shh), (shnw, shnh),\n (shw_min, shh_min), (shwn_min, shhn_min), (shw_max, shh_max),\n (shwn_max, shhn_max), ph, phn) in changed:\n if (horizontal and\n (shw != shnw or w != wn or shw_min != shwn_min or\n shw_max != shwn_max) or\n not horizontal and\n (shh != shnh or h != hn or shh_min != shhn_min or\n shh_max != shhn_max)):\n return True\n\n remove_view(widget, index)\n opt = view_opts[index]\n if horizontal:\n wo, ho = opt['size']\n if shnh is not None:\n _, h = opt['size'] = [wo, shnh * layout_h]\n else:\n h = ho\n\n xo, yo = opt['pos']\n for key, value in phn.items():\n posy = value * layout_h\n if key == 'y':\n yo = posy + cy\n elif key == 'top':\n yo = posy - h\n elif key == 'center_y':\n yo = posy - (h / 2.)\n opt['pos'] = [xo, yo]\n else:\n wo, ho = opt['size']\n if shnw is not None:\n w, _ = opt['size'] = [shnw * layout_w, ho]\n else:\n w = wo\n\n xo, yo = opt['pos']\n for key, value in phn.items():\n posx = value * layout_w\n if key == 'x':\n xo = posx + cx\n elif key == 'right':\n xo = posx - w\n elif key == 'center_x':\n xo = posx - (w / 2.)\n opt['pos'] = [xo, yo]\n\n return relayout\n\n def compute_layout(self, data, flags):\n super(RecycleBoxLayout, self).compute_layout(data, flags)\n\n changed = self._changed_views\n if (changed is None or\n changed and not self._update_sizes(changed)):\n return\n\n self.clear_layout()\n self._rv_positions = None\n if not data:\n l, t, r, b = self.padding\n self.minimum_size = l + r, t + b\n return\n\n view_opts = self.view_opts\n n = len(view_opts)\n for i, x, y, w, h in self._iterate_layout(\n [(opt['size'], opt['size_hint'], opt['pos_hint'],\n opt['size_hint_min'], opt['size_hint_max']) for\n opt in reversed(view_opts)]):\n opt = view_opts[n - i - 1]\n shw, shh = opt['size_hint']\n opt['pos'] = x, y\n wo, ho = opt['size']\n # layout won't/shouldn't change previous size if size_hint is None\n # which is what w/h being None means.\n opt['size'] = [(wo if shw is None else w),\n (ho if shh is None else h)]\n\n spacing = self.spacing\n pos = self._rv_positions = [None, ] * len(data)\n\n if self.orientation == 'horizontal':\n pos[0] = self.x\n last = pos[0] + self.padding[0] + view_opts[0]['size'][0] + \\\n spacing / 2.\n for i, val in enumerate(view_opts[1:], 1):\n pos[i] = last\n last += val['size'][0] + spacing\n else:\n last = pos[-1] = \\\n self.y + self.height - self.padding[1] - \\\n view_opts[0]['size'][1] - spacing / 2.\n n = len(view_opts)\n for i, val in enumerate(view_opts[1:], 1):\n last -= spacing + val['size'][1]\n pos[n - 1 - i] = last\n\n def get_view_index_at(self, pos):\n calc_pos = self._rv_positions\n if not calc_pos:\n return 0\n\n x, y = pos\n\n if self.orientation == 'horizontal':\n if x >= calc_pos[-1] or len(calc_pos) == 1:\n return len(calc_pos) - 1\n\n ix = 0\n for val in calc_pos[1:]:\n if x < val:\n return ix\n ix += 1\n else:\n if y >= calc_pos[-1] or len(calc_pos) == 1:\n return 0\n\n iy = 0\n for val in calc_pos[1:]:\n if y < val:\n return len(calc_pos) - iy - 1\n iy += 1\n\n assert False\n\n def compute_visible_views(self, data, viewport):\n if self._rv_positions is None or not data:\n return []\n\n x, y, w, h = viewport\n at_idx = self.get_view_index_at\n if self.orientation == 'horizontal':\n a, b = at_idx((x, y)), at_idx((x + w, y))\n else:\n a, b = at_idx((x, y + h)), at_idx((x, y))\n return list(range(a, b + 1))\n", "path": "kivy/uix/recycleboxlayout.py" } ]
[ { "content": "\"\"\"\nRecycleBoxLayout\n================\n\n.. versionadded:: 1.9.2\n\n.. warning::\n This module is highly experimental, its API may change in the future and\n the documentation is not complete at this time.\n\nThe RecycleBoxLayout is designed to provide a\n:class:`~kivy.uix.boxlayout.BoxLayout` type layout when used with the\n:class:`~kivy.uix.recycleview.RecycleView` widget. Please refer to the\n:mod:`~kivy.uix.recycleview` module documentation for more information.\n\n\"\"\"\n\nfrom kivy.uix.recyclelayout import RecycleLayout\nfrom kivy.uix.boxlayout import BoxLayout\n\n__all__ = ('RecycleBoxLayout', )\n\n\nclass RecycleBoxLayout(RecycleLayout, BoxLayout):\n\n _rv_positions = None\n\n def __init__(self, **kwargs):\n super(RecycleBoxLayout, self).__init__(**kwargs)\n self.funbind('children', self._trigger_layout)\n\n def _update_sizes(self, changed):\n horizontal = self.orientation == 'horizontal'\n padding_left, padding_top, padding_right, padding_bottom = self.padding\n padding_x = padding_left + padding_right\n padding_y = padding_top + padding_bottom\n selfw = self.width\n selfh = self.height\n layout_w = max(0, selfw - padding_x)\n layout_h = max(0, selfh - padding_y)\n cx = self.x + padding_left\n cy = self.y + padding_bottom\n view_opts = self.view_opts\n remove_view = self.remove_view\n\n for (index, widget, (w, h), (wn, hn), (shw, shh), (shnw, shnh),\n (shw_min, shh_min), (shwn_min, shhn_min), (shw_max, shh_max),\n (shwn_max, shhn_max), ph, phn) in changed:\n if (horizontal and\n (shw != shnw or w != wn or shw_min != shwn_min or\n shw_max != shwn_max) or\n not horizontal and\n (shh != shnh or h != hn or shh_min != shhn_min or\n shh_max != shhn_max)):\n return True\n\n remove_view(widget, index)\n opt = view_opts[index]\n if horizontal:\n wo, ho = opt['size']\n if shnh is not None:\n _, h = opt['size'] = [wo, shnh * layout_h]\n else:\n h = ho\n\n xo, yo = opt['pos']\n for key, value in phn.items():\n posy = value * layout_h\n if key == 'y':\n yo = posy + cy\n elif key == 'top':\n yo = posy - h\n elif key == 'center_y':\n yo = posy - (h / 2.)\n opt['pos'] = [xo, yo]\n else:\n wo, ho = opt['size']\n if shnw is not None:\n w, _ = opt['size'] = [shnw * layout_w, ho]\n else:\n w = wo\n\n xo, yo = opt['pos']\n for key, value in phn.items():\n posx = value * layout_w\n if key == 'x':\n xo = posx + cx\n elif key == 'right':\n xo = posx - w\n elif key == 'center_x':\n xo = posx - (w / 2.)\n opt['pos'] = [xo, yo]\n\n return False\n\n def compute_layout(self, data, flags):\n super(RecycleBoxLayout, self).compute_layout(data, flags)\n\n changed = self._changed_views\n if (changed is None or\n changed and not self._update_sizes(changed)):\n return\n\n self.clear_layout()\n self._rv_positions = None\n if not data:\n l, t, r, b = self.padding\n self.minimum_size = l + r, t + b\n return\n\n view_opts = self.view_opts\n n = len(view_opts)\n for i, x, y, w, h in self._iterate_layout(\n [(opt['size'], opt['size_hint'], opt['pos_hint'],\n opt['size_hint_min'], opt['size_hint_max']) for\n opt in reversed(view_opts)]):\n opt = view_opts[n - i - 1]\n shw, shh = opt['size_hint']\n opt['pos'] = x, y\n wo, ho = opt['size']\n # layout won't/shouldn't change previous size if size_hint is None\n # which is what w/h being None means.\n opt['size'] = [(wo if shw is None else w),\n (ho if shh is None else h)]\n\n spacing = self.spacing\n pos = self._rv_positions = [None, ] * len(data)\n\n if self.orientation == 'horizontal':\n pos[0] = self.x\n last = pos[0] + self.padding[0] + view_opts[0]['size'][0] + \\\n spacing / 2.\n for i, val in enumerate(view_opts[1:], 1):\n pos[i] = last\n last += val['size'][0] + spacing\n else:\n last = pos[-1] = \\\n self.y + self.height - self.padding[1] - \\\n view_opts[0]['size'][1] - spacing / 2.\n n = len(view_opts)\n for i, val in enumerate(view_opts[1:], 1):\n last -= spacing + val['size'][1]\n pos[n - 1 - i] = last\n\n def get_view_index_at(self, pos):\n calc_pos = self._rv_positions\n if not calc_pos:\n return 0\n\n x, y = pos\n\n if self.orientation == 'horizontal':\n if x >= calc_pos[-1] or len(calc_pos) == 1:\n return len(calc_pos) - 1\n\n ix = 0\n for val in calc_pos[1:]:\n if x < val:\n return ix\n ix += 1\n else:\n if y >= calc_pos[-1] or len(calc_pos) == 1:\n return 0\n\n iy = 0\n for val in calc_pos[1:]:\n if y < val:\n return len(calc_pos) - iy - 1\n iy += 1\n\n assert False\n\n def compute_visible_views(self, data, viewport):\n if self._rv_positions is None or not data:\n return []\n\n x, y, w, h = viewport\n at_idx = self.get_view_index_at\n if self.orientation == 'horizontal':\n a, b = at_idx((x, y)), at_idx((x + w, y))\n else:\n a, b = at_idx((x, y + h)), at_idx((x, y))\n return list(range(a, b + 1))\n", "path": "kivy/uix/recycleboxlayout.py" } ]
diff --git a/kivy/uix/recycleboxlayout.py b/kivy/uix/recycleboxlayout.py index 522a59c484..a4888ff93b 100644 --- a/kivy/uix/recycleboxlayout.py +++ b/kivy/uix/recycleboxlayout.py @@ -91,7 +91,7 @@ def _update_sizes(self, changed): xo = posx - (w / 2.) opt['pos'] = [xo, yo] - return relayout + return False def compute_layout(self, data, flags): super(RecycleBoxLayout, self).compute_layout(data, flags)
Netflix__lemur-3166
DNS Providers list doesn't show type In the DNS Providers list, there is a column for the provider type, but it's always empty. Looking at the code, and the API requests, the issue seems to be with the dns_providers API call, which returns the list of all providers. There should be a providerType value in the JSON, but it's not there. A quick glance at the `DnsProvidersNestedOutputSchema` shows that the value is called `providerType`, but in the database the field is called `provider_type` similar to `api_endpoint` which is called `api_endpoint` in the OutputSchema, so I guess, it's probably just mislabeled in the OutputSchema, and needs to be adjusted there, and maybe in the angular template.
[ { "content": "from marshmallow import fields\n\nfrom lemur.common.fields import ArrowDateTime\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\n\n\nclass DnsProvidersNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n name = fields.String()\n providerType = fields.String()\n description = fields.String()\n credentials = fields.String()\n api_endpoint = fields.String()\n date_created = ArrowDateTime()\n\n\nclass DnsProvidersNestedInputSchema(LemurInputSchema):\n __envelope__ = False\n name = fields.String()\n description = fields.String()\n provider_type = fields.Dict()\n\n\ndns_provider_output_schema = DnsProvidersNestedOutputSchema()\n\ndns_provider_input_schema = DnsProvidersNestedInputSchema()\n", "path": "lemur/dns_providers/schemas.py" } ]
[ { "content": "from marshmallow import fields\n\nfrom lemur.common.fields import ArrowDateTime\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\n\n\nclass DnsProvidersNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n name = fields.String()\n provider_type = fields.String()\n description = fields.String()\n credentials = fields.String()\n api_endpoint = fields.String()\n date_created = ArrowDateTime()\n\n\nclass DnsProvidersNestedInputSchema(LemurInputSchema):\n __envelope__ = False\n name = fields.String()\n description = fields.String()\n provider_type = fields.Dict()\n\n\ndns_provider_output_schema = DnsProvidersNestedOutputSchema()\n\ndns_provider_input_schema = DnsProvidersNestedInputSchema()\n", "path": "lemur/dns_providers/schemas.py" } ]
diff --git a/lemur/dns_providers/schemas.py b/lemur/dns_providers/schemas.py index 05b6471d28..af9377b3fa 100644 --- a/lemur/dns_providers/schemas.py +++ b/lemur/dns_providers/schemas.py @@ -8,7 +8,7 @@ class DnsProvidersNestedOutputSchema(LemurOutputSchema): __envelope__ = False id = fields.Integer() name = fields.String() - providerType = fields.String() + provider_type = fields.String() description = fields.String() credentials = fields.String() api_endpoint = fields.String()
ipython__ipython-1942
script magics cause terminal spam since the addition of script magics in cdde5bba8 one gets a _which_ error message outputted to the terminal on each start: e.g. if no python3 is available: ``` $ ipython which: no python3 in (/scratch/jtaylor/progs/localinst/lib/ccache:/scratch/jtaylor/progs/localinst/bin:/scratch/jtaylor/progs/Reflex/software/bin:/usr/lib/qt-3.3/bin:/usr/kerberos/bin:/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:/home/jtaylor/gasgano/bin:/scisoft/bin:/home/jtaylor/scripts:/scratch/jtaylor/progs/root/bin) ```
[ { "content": "\"\"\"Posix-specific implementation of process utilities.\n\nThis file is only meant to be imported by process.py, not by end-users.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib\nimport subprocess as sp\nimport sys\n\nfrom IPython.external import pexpect\n\n# Our own\nfrom .autoattr import auto_attr\nfrom ._process_common import getoutput, arg_split\nfrom IPython.utils import text\nfrom IPython.utils import py3compat\nfrom IPython.utils.encoding import DEFAULT_ENCODING\n\n#-----------------------------------------------------------------------------\n# Function definitions\n#-----------------------------------------------------------------------------\n\ndef _find_cmd(cmd):\n \"\"\"Find the full path to a command using which.\"\"\"\n\n path = sp.Popen(['/usr/bin/env', 'which', cmd],\n stdout=sp.PIPE).communicate()[0]\n return py3compat.bytes_to_str(path)\n\n\nclass ProcessHandler(object):\n \"\"\"Execute subprocesses under the control of pexpect.\n \"\"\"\n # Timeout in seconds to wait on each reading of the subprocess' output.\n # This should not be set too low to avoid cpu overusage from our side,\n # since we read in a loop whose period is controlled by this timeout.\n read_timeout = 0.05\n\n # Timeout to give a process if we receive SIGINT, between sending the\n # SIGINT to the process and forcefully terminating it.\n terminate_timeout = 0.2\n\n # File object where stdout and stderr of the subprocess will be written\n logfile = None\n\n # Shell to call for subprocesses to execute\n sh = None\n\n @auto_attr\n def sh(self):\n sh = pexpect.which('sh')\n if sh is None:\n raise OSError('\"sh\" shell not found')\n return sh\n\n def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None):\n \"\"\"Arguments are used for pexpect calls.\"\"\"\n self.read_timeout = (ProcessHandler.read_timeout if read_timeout is\n None else read_timeout)\n self.terminate_timeout = (ProcessHandler.terminate_timeout if\n terminate_timeout is None else\n terminate_timeout)\n self.logfile = sys.stdout if logfile is None else logfile\n\n def getoutput(self, cmd):\n \"\"\"Run a command and return its stdout/stderr as a string.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n output : str\n A string containing the combination of stdout and stderr from the\n subprocess, in whatever order the subprocess originally wrote to its\n file descriptors (so the order of the information in this string is the\n correct order as would be seen if running the command in a terminal).\n \"\"\"\n try:\n return pexpect.run(self.sh, args=['-c', cmd]).replace('\\r\\n', '\\n')\n except KeyboardInterrupt:\n print('^C', file=sys.stderr, end='')\n\n def getoutput_pexpect(self, cmd):\n \"\"\"Run a command and return its stdout/stderr as a string.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n output : str\n A string containing the combination of stdout and stderr from the\n subprocess, in whatever order the subprocess originally wrote to its\n file descriptors (so the order of the information in this string is the\n correct order as would be seen if running the command in a terminal).\n \"\"\"\n try:\n return pexpect.run(self.sh, args=['-c', cmd]).replace('\\r\\n', '\\n')\n except KeyboardInterrupt:\n print('^C', file=sys.stderr, end='')\n\n def system(self, cmd):\n \"\"\"Execute a command in a subshell.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n int : child's exitstatus\n \"\"\"\n # Get likely encoding for the output.\n enc = DEFAULT_ENCODING\n \n # Patterns to match on the output, for pexpect. We read input and\n # allow either a short timeout or EOF\n patterns = [pexpect.TIMEOUT, pexpect.EOF]\n # the index of the EOF pattern in the list.\n # even though we know it's 1, this call means we don't have to worry if\n # we change the above list, and forget to change this value:\n EOF_index = patterns.index(pexpect.EOF)\n # The size of the output stored so far in the process output buffer.\n # Since pexpect only appends to this buffer, each time we print we\n # record how far we've printed, so that next time we only print *new*\n # content from the buffer.\n out_size = 0\n try:\n # Since we're not really searching the buffer for text patterns, we\n # can set pexpect's search window to be tiny and it won't matter.\n # We only search for the 'patterns' timeout or EOF, which aren't in\n # the text itself.\n #child = pexpect.spawn(pcmd, searchwindowsize=1)\n if hasattr(pexpect, 'spawnb'):\n child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U\n else:\n child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect\n flush = sys.stdout.flush\n while True:\n # res is the index of the pattern that caused the match, so we\n # know whether we've finished (if we matched EOF) or not\n res_idx = child.expect_list(patterns, self.read_timeout)\n print(child.before[out_size:].decode(enc, 'replace'), end='')\n flush()\n if res_idx==EOF_index:\n break\n # Update the pointer to what we've already printed\n out_size = len(child.before)\n except KeyboardInterrupt:\n # We need to send ^C to the process. The ascii code for '^C' is 3\n # (the character is known as ETX for 'End of Text', see\n # curses.ascii.ETX).\n child.sendline(chr(3))\n # Read and print any more output the program might produce on its\n # way out.\n try:\n out_size = len(child.before)\n child.expect_list(patterns, self.terminate_timeout)\n print(child.before[out_size:].decode(enc, 'replace'), end='')\n sys.stdout.flush()\n except KeyboardInterrupt:\n # Impatient users tend to type it multiple times\n pass\n finally:\n # Ensure the subprocess really is terminated\n child.terminate(force=True)\n # add isalive check, to ensure exitstatus is set:\n child.isalive()\n return child.exitstatus\n\n\n# Make system() with a functional interface for outside use. Note that we use\n# getoutput() from the _common utils, which is built on top of popen(). Using\n# pexpect to get subprocess output produces difficult to parse output, since\n# programs think they are talking to a tty and produce highly formatted output\n# (ls is a good example) that makes them hard.\nsystem = ProcessHandler().system\n\n\n\n", "path": "IPython/utils/_process_posix.py" } ]
[ { "content": "\"\"\"Posix-specific implementation of process utilities.\n\nThis file is only meant to be imported by process.py, not by end-users.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib\nimport subprocess as sp\nimport sys\n\nfrom IPython.external import pexpect\n\n# Our own\nfrom .autoattr import auto_attr\nfrom ._process_common import getoutput, arg_split\nfrom IPython.utils import text\nfrom IPython.utils import py3compat\nfrom IPython.utils.encoding import DEFAULT_ENCODING\n\n#-----------------------------------------------------------------------------\n# Function definitions\n#-----------------------------------------------------------------------------\n\ndef _find_cmd(cmd):\n \"\"\"Find the full path to a command using which.\"\"\"\n\n path = sp.Popen(['/usr/bin/env', 'which', cmd],\n stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0]\n return py3compat.bytes_to_str(path)\n\n\nclass ProcessHandler(object):\n \"\"\"Execute subprocesses under the control of pexpect.\n \"\"\"\n # Timeout in seconds to wait on each reading of the subprocess' output.\n # This should not be set too low to avoid cpu overusage from our side,\n # since we read in a loop whose period is controlled by this timeout.\n read_timeout = 0.05\n\n # Timeout to give a process if we receive SIGINT, between sending the\n # SIGINT to the process and forcefully terminating it.\n terminate_timeout = 0.2\n\n # File object where stdout and stderr of the subprocess will be written\n logfile = None\n\n # Shell to call for subprocesses to execute\n sh = None\n\n @auto_attr\n def sh(self):\n sh = pexpect.which('sh')\n if sh is None:\n raise OSError('\"sh\" shell not found')\n return sh\n\n def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None):\n \"\"\"Arguments are used for pexpect calls.\"\"\"\n self.read_timeout = (ProcessHandler.read_timeout if read_timeout is\n None else read_timeout)\n self.terminate_timeout = (ProcessHandler.terminate_timeout if\n terminate_timeout is None else\n terminate_timeout)\n self.logfile = sys.stdout if logfile is None else logfile\n\n def getoutput(self, cmd):\n \"\"\"Run a command and return its stdout/stderr as a string.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n output : str\n A string containing the combination of stdout and stderr from the\n subprocess, in whatever order the subprocess originally wrote to its\n file descriptors (so the order of the information in this string is the\n correct order as would be seen if running the command in a terminal).\n \"\"\"\n try:\n return pexpect.run(self.sh, args=['-c', cmd]).replace('\\r\\n', '\\n')\n except KeyboardInterrupt:\n print('^C', file=sys.stderr, end='')\n\n def getoutput_pexpect(self, cmd):\n \"\"\"Run a command and return its stdout/stderr as a string.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n output : str\n A string containing the combination of stdout and stderr from the\n subprocess, in whatever order the subprocess originally wrote to its\n file descriptors (so the order of the information in this string is the\n correct order as would be seen if running the command in a terminal).\n \"\"\"\n try:\n return pexpect.run(self.sh, args=['-c', cmd]).replace('\\r\\n', '\\n')\n except KeyboardInterrupt:\n print('^C', file=sys.stderr, end='')\n\n def system(self, cmd):\n \"\"\"Execute a command in a subshell.\n\n Parameters\n ----------\n cmd : str\n A command to be executed in the system shell.\n\n Returns\n -------\n int : child's exitstatus\n \"\"\"\n # Get likely encoding for the output.\n enc = DEFAULT_ENCODING\n \n # Patterns to match on the output, for pexpect. We read input and\n # allow either a short timeout or EOF\n patterns = [pexpect.TIMEOUT, pexpect.EOF]\n # the index of the EOF pattern in the list.\n # even though we know it's 1, this call means we don't have to worry if\n # we change the above list, and forget to change this value:\n EOF_index = patterns.index(pexpect.EOF)\n # The size of the output stored so far in the process output buffer.\n # Since pexpect only appends to this buffer, each time we print we\n # record how far we've printed, so that next time we only print *new*\n # content from the buffer.\n out_size = 0\n try:\n # Since we're not really searching the buffer for text patterns, we\n # can set pexpect's search window to be tiny and it won't matter.\n # We only search for the 'patterns' timeout or EOF, which aren't in\n # the text itself.\n #child = pexpect.spawn(pcmd, searchwindowsize=1)\n if hasattr(pexpect, 'spawnb'):\n child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U\n else:\n child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect\n flush = sys.stdout.flush\n while True:\n # res is the index of the pattern that caused the match, so we\n # know whether we've finished (if we matched EOF) or not\n res_idx = child.expect_list(patterns, self.read_timeout)\n print(child.before[out_size:].decode(enc, 'replace'), end='')\n flush()\n if res_idx==EOF_index:\n break\n # Update the pointer to what we've already printed\n out_size = len(child.before)\n except KeyboardInterrupt:\n # We need to send ^C to the process. The ascii code for '^C' is 3\n # (the character is known as ETX for 'End of Text', see\n # curses.ascii.ETX).\n child.sendline(chr(3))\n # Read and print any more output the program might produce on its\n # way out.\n try:\n out_size = len(child.before)\n child.expect_list(patterns, self.terminate_timeout)\n print(child.before[out_size:].decode(enc, 'replace'), end='')\n sys.stdout.flush()\n except KeyboardInterrupt:\n # Impatient users tend to type it multiple times\n pass\n finally:\n # Ensure the subprocess really is terminated\n child.terminate(force=True)\n # add isalive check, to ensure exitstatus is set:\n child.isalive()\n return child.exitstatus\n\n\n# Make system() with a functional interface for outside use. Note that we use\n# getoutput() from the _common utils, which is built on top of popen(). Using\n# pexpect to get subprocess output produces difficult to parse output, since\n# programs think they are talking to a tty and produce highly formatted output\n# (ls is a good example) that makes them hard.\nsystem = ProcessHandler().system\n\n\n\n", "path": "IPython/utils/_process_posix.py" } ]
diff --git a/IPython/utils/_process_posix.py b/IPython/utils/_process_posix.py index 8d4e6f24d07..50f302635d7 100644 --- a/IPython/utils/_process_posix.py +++ b/IPython/utils/_process_posix.py @@ -36,7 +36,7 @@ def _find_cmd(cmd): """Find the full path to a command using which.""" path = sp.Popen(['/usr/bin/env', 'which', cmd], - stdout=sp.PIPE).communicate()[0] + stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0] return py3compat.bytes_to_str(path)
pantsbuild__pants-4690
v2 python pipeline can fail with IOError attempting to open non-existent PEX-INFO file there seems to be some kind of issue with the v2 python pipeline where chroot dirs may be created without actually containing the appropriate pex guts. subsequent attempts to open the dir as a pex fail with the following traceback: ``` Exception caught: (<type 'exceptions.IOError'>) File ".bootstrap/_pex/pex.py", line 360, in execute self._wrap_coverage(self._wrap_profiling, self._execute) File ".bootstrap/_pex/pex.py", line 288, in _wrap_coverage runner(*args) File ".bootstrap/_pex/pex.py", line 320, in _wrap_profiling runner(*args) File ".bootstrap/_pex/pex.py", line 403, in _execute return self.execute_entry(self._pex_info.entry_point) File ".bootstrap/_pex/pex.py", line 461, in execute_entry return runner(entry_point) File ".bootstrap/_pex/pex.py", line 479, in execute_pkg_resources return runner() File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/bin/pants_exe.py", line 44, in main PantsRunner(exiter).run() File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/bin/pants_runner.py", line 57, in run options_bootstrapper=options_bootstrapper) File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/bin/pants_runner.py", line 46, in _run return LocalPantsRunner(exiter, args, env, options_bootstrapper=options_bootstrapper).run() File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/bin/local_pants_runner.py", line 37, in run self._run() File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/bin/local_pants_runner.py", line 79, in _run goal_runner_result = goal_runner.run() File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/bin/goal_runner.py", line 263, in run result = self._execute_engine() File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/bin/goal_runner.py", line 252, in _execute_engine result = engine.execute(self._context, self._goals) File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/engine/legacy_engine.py", line 26, in execute self.attempt(context, goals) File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/engine/round_engine.py", line 224, in attempt goal_executor.attempt(explain) File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/engine/round_engine.py", line 47, in attempt task.execute() File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/backend/python/tasks2/resolve_requirements.py", line 22, in execute pex = self.resolve_requirements(req_libs) File "/Users/kwilson/dev/source3/.pex/install/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl.e1e47cc7c0ed42f9079dc57415d84d0475bb8579/pantsbuild.pants-1.3.0rc2+4151880633-py2-none-any.whl/pants/backend/python/tasks2/resolve_requirements_task_base.py", line 51, in resolve_requirements return PEX(path, interpreter=interpreter) File "/Users/kwilson/dev/source3/.pex/install/pex-1.2.6-py2.py3-none-any.whl.cd87d3cea16ee5296bcdaff2f0885852ffe356c9/pex-1.2.6-py2.py3-none-any.whl/pex/pex.py", line 59, in __init__ self._pex_info = PexInfo.from_pex(self._pex) File "/Users/kwilson/dev/source3/.pex/install/pex-1.2.6-py2.py3-none-any.whl.cd87d3cea16ee5296bcdaff2f0885852ffe356c9/pex-1.2.6-py2.py3-none-any.whl/pex/pex_info.py", line 79, in from_pex with open(os.path.join(pex, cls.PATH)) as fp: Exception message: [Errno 2] No such file or directory: u'/Users/kwilson/dev/source3/.pants.d/pyprep/requirements/CPython-2.7.10/6ad744b7087e2b25f09595d0527bdedb3b92c69a/PEX-INFO' ``` where the referenced dir is empty: ``` $ ls -al /Users/kwilson/dev/source3/.pants.d/pyprep/requirements/CPython-2.7.10/6ad744b7087e2b25f09595d0527bdedb3b92c69a total 0 drwxr-xr-x 2 kwilson staff 68 Jun 5 18:17 . drwxr-xr-x 6 kwilson staff 204 Jun 5 18:17 .. ```
[ { "content": "# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport atexit\nimport errno\nimport os\nimport shutil\nimport stat\nimport tempfile\nimport threading\nimport uuid\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nfrom pants.util.strutil import ensure_text\n\n\ndef longest_dir_prefix(path, prefixes):\n \"\"\"Given a list of prefixes, return the one that is the longest prefix to the given path.\n\n Returns None if there are no matches.\n \"\"\"\n longest_match, longest_prefix = 0, None\n for prefix in prefixes:\n if fast_relpath_optional(path, prefix) is not None and len(prefix) > longest_match:\n longest_match, longest_prefix = len(prefix), prefix\n\n return longest_prefix\n\n\ndef fast_relpath(path, start):\n \"\"\"A prefix-based relpath, with no normalization or support for returning `..`.\"\"\"\n relpath = fast_relpath_optional(path, start)\n if relpath is None:\n raise ValueError('{} is not a directory containing {}'.format(start, path))\n return relpath\n\n\ndef fast_relpath_optional(path, start):\n \"\"\"A prefix-based relpath, with no normalization or support for returning `..`.\n\n Returns None if `start` is not a directory-aware prefix of `path`.\n \"\"\"\n if len(start) == 0:\n # Empty prefix.\n return path\n\n # Determine where the matchable prefix ends.\n pref_end = len(start) - 1 if start[-1] == '/' else len(start)\n if pref_end > len(path):\n # The prefix is too long to match.\n return None\n elif path[:pref_end] == start[:pref_end] and (len(path) == pref_end or path[pref_end] == '/'):\n # The prefix matches, and the entries are either identical, or the suffix indicates that\n # the prefix is a directory.\n return path[pref_end+1:]\n\n\ndef safe_mkdir(directory, clean=False):\n \"\"\"Ensure a directory is present.\n\n If it's not there, create it. If it is, no-op. If clean is True, ensure the dir is empty.\n\n :API: public\n \"\"\"\n if clean:\n safe_rmtree(directory)\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef safe_mkdir_for(path):\n \"\"\"Ensure that the parent directory for a file is present.\n\n If it's not there, create it. If it is, no-op.\n \"\"\"\n safe_mkdir(os.path.dirname(path), clean=False)\n\n\ndef safe_file_dump(filename, payload):\n \"\"\"Write a string to a file.\n\n :param string filename: The filename of the file to write to.\n :param string payload: The string to write to the file.\n \"\"\"\n with safe_open(filename, 'wb') as f:\n f.write(payload)\n\n\ndef read_file(filename):\n \"\"\"Read and return the contents of a file in a single file.read().\n\n :param string filename: The filename of the file to read.\n :returns: The contents of the file.\n :rtype: string\n \"\"\"\n with open(filename, 'rb') as f:\n return f.read()\n\n\ndef safe_walk(path, **kwargs):\n \"\"\"Just like os.walk, but ensures that the returned values are unicode objects.\n\n This isn't strictly safe, in that it is possible that some paths\n will not be decodeable, but that case is rare, and the only\n alternative is to somehow avoid all interaction between paths and\n unicode objects, which seems especially tough in the presence of\n unicode_literals. See e.g.\n https://mail.python.org/pipermail/python-dev/2008-December/083856.html\n\n :API: public\n \"\"\"\n # If os.walk is given a text argument, it yields text values; if it\n # is given a binary argument, it yields binary values.\n return os.walk(ensure_text(path), **kwargs)\n\n\n_MKDTEMP_CLEANER = None\n_MKDTEMP_DIRS = defaultdict(set)\n_MKDTEMP_LOCK = threading.RLock()\n\n\ndef _mkdtemp_atexit_cleaner():\n for td in _MKDTEMP_DIRS.pop(os.getpid(), []):\n safe_rmtree(td)\n\n\ndef _mkdtemp_unregister_cleaner():\n global _MKDTEMP_CLEANER\n _MKDTEMP_CLEANER = None\n\n\ndef _mkdtemp_register_cleaner(cleaner):\n global _MKDTEMP_CLEANER\n if not cleaner:\n return\n assert callable(cleaner)\n if _MKDTEMP_CLEANER is None:\n atexit.register(cleaner)\n _MKDTEMP_CLEANER = cleaner\n\n\ndef safe_mkdtemp(cleaner=_mkdtemp_atexit_cleaner, **kw):\n \"\"\"Create a temporary directory that is cleaned up on process exit.\n\n Arguments are as to tempfile.mkdtemp.\n\n :API: public\n \"\"\"\n # Proper lock sanitation on fork [issue 6721] would be desirable here.\n with _MKDTEMP_LOCK:\n return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner)\n\n\ndef register_rmtree(directory, cleaner=_mkdtemp_atexit_cleaner):\n \"\"\"Register an existing directory to be cleaned up at process exit.\"\"\"\n with _MKDTEMP_LOCK:\n _mkdtemp_register_cleaner(cleaner)\n _MKDTEMP_DIRS[os.getpid()].add(directory)\n return directory\n\n\ndef safe_rmtree(directory):\n \"\"\"Delete a directory if it's present. If it's not present, no-op.\n\n Note that if the directory argument is a symlink, only the symlink will\n be deleted.\n\n :API: public\n \"\"\"\n if os.path.islink(directory):\n safe_delete(directory)\n else:\n shutil.rmtree(directory, ignore_errors=True)\n\n\ndef safe_open(filename, *args, **kwargs):\n \"\"\"Open a file safely, ensuring that its directory exists.\n\n :API: public\n \"\"\"\n safe_mkdir_for(filename)\n return open(filename, *args, **kwargs)\n\n\ndef safe_delete(filename):\n \"\"\"Delete a file safely. If it's not present, no-op.\"\"\"\n try:\n os.unlink(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n\n\ndef safe_concurrent_rename(src, dst):\n \"\"\"Rename src to dst, ignoring errors due to dst already existing.\n\n Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins.\n \"\"\"\n # Delete dst, in case it existed (with old content) even before any concurrent processes\n # attempted this write. This ensures that at least one process writes the new content.\n if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src.\n safe_rmtree(dst)\n else:\n safe_delete(dst)\n try:\n shutil.move(src, dst)\n except IOError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef safe_rm_oldest_items_in_dir(root_dir, num_of_items_to_keep, excludes=frozenset()):\n \"\"\"\n Keep `num_of_items_to_keep` newly modified items besides `excludes` in `root_dir` then remove the rest.\n :param root_dir: the folder to examine\n :param num_of_items_to_keep: number of files/folders/symlinks to keep after the cleanup\n :param excludes: absolute paths excluded from removal (must be prefixed with `root_dir`)\n :return: none\n \"\"\"\n if os.path.isdir(root_dir):\n found_files = []\n for old_file in os.listdir(root_dir):\n full_path = os.path.join(root_dir, old_file)\n if full_path not in excludes:\n found_files.append((full_path, os.path.getmtime(full_path)))\n found_files = sorted(found_files, key=lambda x: x[1], reverse=True)\n for cur_file, _ in found_files[num_of_items_to_keep:]:\n rm_rf(cur_file)\n\n\n@contextmanager\ndef safe_concurrent_creation(target_path):\n \"\"\"A contextmanager that yields a temporary path and renames it to a final target path when the\n contextmanager exits.\n\n Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.\n\n :param target_path: The final target path to rename the temporary path to.\n :yields: A temporary path containing the original path with a unique (uuid4) suffix.\n \"\"\"\n safe_mkdir_for(target_path)\n tmp_path = '{}.tmp.{}'.format(target_path, uuid.uuid4().hex)\n try:\n yield tmp_path\n finally:\n if os.path.exists(tmp_path):\n safe_concurrent_rename(tmp_path, target_path)\n\n\ndef chmod_plus_x(path):\n \"\"\"Equivalent of unix `chmod a+x path`\"\"\"\n path_mode = os.stat(path).st_mode\n path_mode &= int('777', 8)\n if path_mode & stat.S_IRUSR:\n path_mode |= stat.S_IXUSR\n if path_mode & stat.S_IRGRP:\n path_mode |= stat.S_IXGRP\n if path_mode & stat.S_IROTH:\n path_mode |= stat.S_IXOTH\n os.chmod(path, path_mode)\n\n\ndef absolute_symlink(source_path, target_path):\n \"\"\"Create a symlink at target pointing to source using the absolute path.\n\n :param source_path: Absolute path to source file\n :param target_path: Absolute path to intended symlink\n :raises ValueError if source_path or link_path are not unique, absolute paths\n :raises OSError on failure UNLESS file already exists or no such file/directory\n \"\"\"\n if not os.path.isabs(source_path):\n raise ValueError(\"Path for source : {} must be absolute\".format(source_path))\n if not os.path.isabs(target_path):\n raise ValueError(\"Path for link : {} must be absolute\".format(target_path))\n if source_path == target_path:\n raise ValueError(\"Path for link is identical to source : {}\".format(source_path))\n try:\n if os.path.lexists(target_path):\n if os.path.islink(target_path) or os.path.isfile(target_path):\n os.unlink(target_path)\n else:\n shutil.rmtree(target_path)\n safe_mkdir_for(target_path)\n os.symlink(source_path, target_path)\n except OSError as e:\n # Another run may beat us to deletion or creation.\n if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):\n raise\n\n\ndef relative_symlink(source_path, link_path):\n \"\"\"Create a symlink at link_path pointing to relative source\n\n :param source_path: Absolute path to source file\n :param link_path: Absolute path to intended symlink\n :raises ValueError if source_path or link_path are not unique, absolute paths\n :raises OSError on failure UNLESS file already exists or no such file/directory\n \"\"\"\n if not os.path.isabs(source_path):\n raise ValueError(\"Path for source:{} must be absolute\".format(source_path))\n if not os.path.isabs(link_path):\n raise ValueError(\"Path for link:{} must be absolute\".format(link_path))\n if source_path == link_path:\n raise ValueError(\"Path for link is identical to source:{}\".format(source_path))\n # The failure state below had a long life as an uncaught error. No behavior was changed here, it just adds a catch.\n # Raising an exception does differ from absolute_symlink, which takes the liberty of deleting existing directories.\n if os.path.isdir(link_path) and not os.path.islink(link_path):\n raise ValueError(\"Path for link would overwrite an existing directory: {}\".format(link_path))\n try:\n if os.path.lexists(link_path):\n os.unlink(link_path)\n rel_path = os.path.relpath(source_path, os.path.dirname(link_path))\n os.symlink(rel_path, link_path)\n except OSError as e:\n # Another run may beat us to deletion or creation.\n if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):\n raise\n\n\ndef relativize_path(path, rootdir):\n \"\"\"\n\n :API: public\n \"\"\"\n # Note that we can't test for length and return the shorter of the two, because we need these\n # paths to be stable across systems (e.g., because they get embedded in analysis files),\n # and this choice might be inconsistent across systems. So we assume the relpath is always\n # shorter. We relativize because of a known case of very long full path prefixes on Mesos,\n # so this seems like the right heuristic.\n # Note also that we mustn't call realpath on the path - we need to preserve the symlink structure.\n return os.path.relpath(path, rootdir)\n\n\n# When running pants under mesos/aurora, the sandbox pathname can be very long. Since it gets\n# prepended to most components in the classpath (some from ivy, the rest from the build),\n# in some runs the classpath gets too big and exceeds ARG_MAX.\n# We prevent this by using paths relative to the current working directory.\ndef relativize_paths(paths, rootdir):\n return [relativize_path(path, rootdir) for path in paths]\n\n\ndef touch(path, times=None):\n \"\"\"Equivalent of unix `touch path`.\n\n :API: public\n\n :path: The file to touch.\n :times Either a tuple of (atime, mtime) or else a single time to use for both. If not\n specified both atime and mtime are updated to the current time.\n \"\"\"\n if times:\n if len(times) > 2:\n raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value '\n 'to use for both.')\n\n if len(times) == 1:\n times = (times, times)\n\n with safe_open(path, 'a'):\n os.utime(path, times)\n\n\ndef get_basedir(path):\n \"\"\"Returns the base directory of a path.\n\n Examples:\n get_basedir('foo/bar/baz') --> 'foo'\n get_basedir('/foo/bar/baz') --> ''\n get_basedir('foo') --> 'foo'\n \"\"\"\n return path[:path.index(os.sep)] if os.sep in path else path\n\n\ndef rm_rf(name):\n \"\"\"Remove a file or a directory similarly to running `rm -rf <name>` in a UNIX shell.\n\n :param str name: the name of the file or directory to remove.\n :raises: OSError on error.\n \"\"\"\n if not os.path.exists(name):\n return\n\n try:\n # Avoid using safe_rmtree so we can detect failures.\n shutil.rmtree(name)\n except OSError as e:\n if e.errno == errno.ENOTDIR:\n # 'Not a directory', but a file. Attempt to os.unlink the file, raising OSError on failure.\n safe_delete(name)\n elif e.errno != errno.ENOENT:\n # Pass on 'No such file or directory', otherwise re-raise OSError to surface perm issues etc.\n raise\n", "path": "src/python/pants/util/dirutil.py" } ]
[ { "content": "# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport atexit\nimport errno\nimport os\nimport shutil\nimport stat\nimport tempfile\nimport threading\nimport uuid\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nfrom pants.util.strutil import ensure_text\n\n\ndef longest_dir_prefix(path, prefixes):\n \"\"\"Given a list of prefixes, return the one that is the longest prefix to the given path.\n\n Returns None if there are no matches.\n \"\"\"\n longest_match, longest_prefix = 0, None\n for prefix in prefixes:\n if fast_relpath_optional(path, prefix) is not None and len(prefix) > longest_match:\n longest_match, longest_prefix = len(prefix), prefix\n\n return longest_prefix\n\n\ndef fast_relpath(path, start):\n \"\"\"A prefix-based relpath, with no normalization or support for returning `..`.\"\"\"\n relpath = fast_relpath_optional(path, start)\n if relpath is None:\n raise ValueError('{} is not a directory containing {}'.format(start, path))\n return relpath\n\n\ndef fast_relpath_optional(path, start):\n \"\"\"A prefix-based relpath, with no normalization or support for returning `..`.\n\n Returns None if `start` is not a directory-aware prefix of `path`.\n \"\"\"\n if len(start) == 0:\n # Empty prefix.\n return path\n\n # Determine where the matchable prefix ends.\n pref_end = len(start) - 1 if start[-1] == '/' else len(start)\n if pref_end > len(path):\n # The prefix is too long to match.\n return None\n elif path[:pref_end] == start[:pref_end] and (len(path) == pref_end or path[pref_end] == '/'):\n # The prefix matches, and the entries are either identical, or the suffix indicates that\n # the prefix is a directory.\n return path[pref_end+1:]\n\n\ndef safe_mkdir(directory, clean=False):\n \"\"\"Ensure a directory is present.\n\n If it's not there, create it. If it is, no-op. If clean is True, ensure the dir is empty.\n\n :API: public\n \"\"\"\n if clean:\n safe_rmtree(directory)\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef safe_mkdir_for(path):\n \"\"\"Ensure that the parent directory for a file is present.\n\n If it's not there, create it. If it is, no-op.\n \"\"\"\n safe_mkdir(os.path.dirname(path), clean=False)\n\n\ndef safe_file_dump(filename, payload):\n \"\"\"Write a string to a file.\n\n :param string filename: The filename of the file to write to.\n :param string payload: The string to write to the file.\n \"\"\"\n with safe_open(filename, 'wb') as f:\n f.write(payload)\n\n\ndef read_file(filename):\n \"\"\"Read and return the contents of a file in a single file.read().\n\n :param string filename: The filename of the file to read.\n :returns: The contents of the file.\n :rtype: string\n \"\"\"\n with open(filename, 'rb') as f:\n return f.read()\n\n\ndef safe_walk(path, **kwargs):\n \"\"\"Just like os.walk, but ensures that the returned values are unicode objects.\n\n This isn't strictly safe, in that it is possible that some paths\n will not be decodeable, but that case is rare, and the only\n alternative is to somehow avoid all interaction between paths and\n unicode objects, which seems especially tough in the presence of\n unicode_literals. See e.g.\n https://mail.python.org/pipermail/python-dev/2008-December/083856.html\n\n :API: public\n \"\"\"\n # If os.walk is given a text argument, it yields text values; if it\n # is given a binary argument, it yields binary values.\n return os.walk(ensure_text(path), **kwargs)\n\n\n_MKDTEMP_CLEANER = None\n_MKDTEMP_DIRS = defaultdict(set)\n_MKDTEMP_LOCK = threading.RLock()\n\n\ndef _mkdtemp_atexit_cleaner():\n for td in _MKDTEMP_DIRS.pop(os.getpid(), []):\n safe_rmtree(td)\n\n\ndef _mkdtemp_unregister_cleaner():\n global _MKDTEMP_CLEANER\n _MKDTEMP_CLEANER = None\n\n\ndef _mkdtemp_register_cleaner(cleaner):\n global _MKDTEMP_CLEANER\n if not cleaner:\n return\n assert callable(cleaner)\n if _MKDTEMP_CLEANER is None:\n atexit.register(cleaner)\n _MKDTEMP_CLEANER = cleaner\n\n\ndef safe_mkdtemp(cleaner=_mkdtemp_atexit_cleaner, **kw):\n \"\"\"Create a temporary directory that is cleaned up on process exit.\n\n Arguments are as to tempfile.mkdtemp.\n\n :API: public\n \"\"\"\n # Proper lock sanitation on fork [issue 6721] would be desirable here.\n with _MKDTEMP_LOCK:\n return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner)\n\n\ndef register_rmtree(directory, cleaner=_mkdtemp_atexit_cleaner):\n \"\"\"Register an existing directory to be cleaned up at process exit.\"\"\"\n with _MKDTEMP_LOCK:\n _mkdtemp_register_cleaner(cleaner)\n _MKDTEMP_DIRS[os.getpid()].add(directory)\n return directory\n\n\ndef safe_rmtree(directory):\n \"\"\"Delete a directory if it's present. If it's not present, no-op.\n\n Note that if the directory argument is a symlink, only the symlink will\n be deleted.\n\n :API: public\n \"\"\"\n if os.path.islink(directory):\n safe_delete(directory)\n else:\n shutil.rmtree(directory, ignore_errors=True)\n\n\ndef safe_open(filename, *args, **kwargs):\n \"\"\"Open a file safely, ensuring that its directory exists.\n\n :API: public\n \"\"\"\n safe_mkdir_for(filename)\n return open(filename, *args, **kwargs)\n\n\ndef safe_delete(filename):\n \"\"\"Delete a file safely. If it's not present, no-op.\"\"\"\n try:\n os.unlink(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n\n\ndef safe_concurrent_rename(src, dst):\n \"\"\"Rename src to dst, ignoring errors due to dst already existing.\n\n Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins.\n \"\"\"\n # Delete dst, in case it existed (with old content) even before any concurrent processes\n # attempted this write. This ensures that at least one process writes the new content.\n if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src.\n safe_rmtree(dst)\n else:\n safe_delete(dst)\n try:\n shutil.move(src, dst)\n except IOError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef safe_rm_oldest_items_in_dir(root_dir, num_of_items_to_keep, excludes=frozenset()):\n \"\"\"\n Keep `num_of_items_to_keep` newly modified items besides `excludes` in `root_dir` then remove the rest.\n :param root_dir: the folder to examine\n :param num_of_items_to_keep: number of files/folders/symlinks to keep after the cleanup\n :param excludes: absolute paths excluded from removal (must be prefixed with `root_dir`)\n :return: none\n \"\"\"\n if os.path.isdir(root_dir):\n found_files = []\n for old_file in os.listdir(root_dir):\n full_path = os.path.join(root_dir, old_file)\n if full_path not in excludes:\n found_files.append((full_path, os.path.getmtime(full_path)))\n found_files = sorted(found_files, key=lambda x: x[1], reverse=True)\n for cur_file, _ in found_files[num_of_items_to_keep:]:\n rm_rf(cur_file)\n\n\n@contextmanager\ndef safe_concurrent_creation(target_path):\n \"\"\"A contextmanager that yields a temporary path and renames it to a final target path when the\n contextmanager exits.\n\n Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins.\n\n :param target_path: The final target path to rename the temporary path to.\n :yields: A temporary path containing the original path with a unique (uuid4) suffix.\n \"\"\"\n safe_mkdir_for(target_path)\n tmp_path = '{}.tmp.{}'.format(target_path, uuid.uuid4().hex)\n try:\n yield tmp_path\n except Exception:\n rm_rf(tmp_path)\n raise\n else:\n if os.path.exists(tmp_path):\n safe_concurrent_rename(tmp_path, target_path)\n\n\ndef chmod_plus_x(path):\n \"\"\"Equivalent of unix `chmod a+x path`\"\"\"\n path_mode = os.stat(path).st_mode\n path_mode &= int('777', 8)\n if path_mode & stat.S_IRUSR:\n path_mode |= stat.S_IXUSR\n if path_mode & stat.S_IRGRP:\n path_mode |= stat.S_IXGRP\n if path_mode & stat.S_IROTH:\n path_mode |= stat.S_IXOTH\n os.chmod(path, path_mode)\n\n\ndef absolute_symlink(source_path, target_path):\n \"\"\"Create a symlink at target pointing to source using the absolute path.\n\n :param source_path: Absolute path to source file\n :param target_path: Absolute path to intended symlink\n :raises ValueError if source_path or link_path are not unique, absolute paths\n :raises OSError on failure UNLESS file already exists or no such file/directory\n \"\"\"\n if not os.path.isabs(source_path):\n raise ValueError(\"Path for source : {} must be absolute\".format(source_path))\n if not os.path.isabs(target_path):\n raise ValueError(\"Path for link : {} must be absolute\".format(target_path))\n if source_path == target_path:\n raise ValueError(\"Path for link is identical to source : {}\".format(source_path))\n try:\n if os.path.lexists(target_path):\n if os.path.islink(target_path) or os.path.isfile(target_path):\n os.unlink(target_path)\n else:\n shutil.rmtree(target_path)\n safe_mkdir_for(target_path)\n os.symlink(source_path, target_path)\n except OSError as e:\n # Another run may beat us to deletion or creation.\n if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):\n raise\n\n\ndef relative_symlink(source_path, link_path):\n \"\"\"Create a symlink at link_path pointing to relative source\n\n :param source_path: Absolute path to source file\n :param link_path: Absolute path to intended symlink\n :raises ValueError if source_path or link_path are not unique, absolute paths\n :raises OSError on failure UNLESS file already exists or no such file/directory\n \"\"\"\n if not os.path.isabs(source_path):\n raise ValueError(\"Path for source:{} must be absolute\".format(source_path))\n if not os.path.isabs(link_path):\n raise ValueError(\"Path for link:{} must be absolute\".format(link_path))\n if source_path == link_path:\n raise ValueError(\"Path for link is identical to source:{}\".format(source_path))\n # The failure state below had a long life as an uncaught error. No behavior was changed here, it just adds a catch.\n # Raising an exception does differ from absolute_symlink, which takes the liberty of deleting existing directories.\n if os.path.isdir(link_path) and not os.path.islink(link_path):\n raise ValueError(\"Path for link would overwrite an existing directory: {}\".format(link_path))\n try:\n if os.path.lexists(link_path):\n os.unlink(link_path)\n rel_path = os.path.relpath(source_path, os.path.dirname(link_path))\n os.symlink(rel_path, link_path)\n except OSError as e:\n # Another run may beat us to deletion or creation.\n if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):\n raise\n\n\ndef relativize_path(path, rootdir):\n \"\"\"\n\n :API: public\n \"\"\"\n # Note that we can't test for length and return the shorter of the two, because we need these\n # paths to be stable across systems (e.g., because they get embedded in analysis files),\n # and this choice might be inconsistent across systems. So we assume the relpath is always\n # shorter. We relativize because of a known case of very long full path prefixes on Mesos,\n # so this seems like the right heuristic.\n # Note also that we mustn't call realpath on the path - we need to preserve the symlink structure.\n return os.path.relpath(path, rootdir)\n\n\n# When running pants under mesos/aurora, the sandbox pathname can be very long. Since it gets\n# prepended to most components in the classpath (some from ivy, the rest from the build),\n# in some runs the classpath gets too big and exceeds ARG_MAX.\n# We prevent this by using paths relative to the current working directory.\ndef relativize_paths(paths, rootdir):\n return [relativize_path(path, rootdir) for path in paths]\n\n\ndef touch(path, times=None):\n \"\"\"Equivalent of unix `touch path`.\n\n :API: public\n\n :path: The file to touch.\n :times Either a tuple of (atime, mtime) or else a single time to use for both. If not\n specified both atime and mtime are updated to the current time.\n \"\"\"\n if times:\n if len(times) > 2:\n raise ValueError('times must either be a tuple of (atime, mtime) or else a single time value '\n 'to use for both.')\n\n if len(times) == 1:\n times = (times, times)\n\n with safe_open(path, 'a'):\n os.utime(path, times)\n\n\ndef get_basedir(path):\n \"\"\"Returns the base directory of a path.\n\n Examples:\n get_basedir('foo/bar/baz') --> 'foo'\n get_basedir('/foo/bar/baz') --> ''\n get_basedir('foo') --> 'foo'\n \"\"\"\n return path[:path.index(os.sep)] if os.sep in path else path\n\n\ndef rm_rf(name):\n \"\"\"Remove a file or a directory similarly to running `rm -rf <name>` in a UNIX shell.\n\n :param str name: the name of the file or directory to remove.\n :raises: OSError on error.\n \"\"\"\n if not os.path.exists(name):\n return\n\n try:\n # Avoid using safe_rmtree so we can detect failures.\n shutil.rmtree(name)\n except OSError as e:\n if e.errno == errno.ENOTDIR:\n # 'Not a directory', but a file. Attempt to os.unlink the file, raising OSError on failure.\n safe_delete(name)\n elif e.errno != errno.ENOENT:\n # Pass on 'No such file or directory', otherwise re-raise OSError to surface perm issues etc.\n raise\n", "path": "src/python/pants/util/dirutil.py" } ]
diff --git a/.travis.yml b/.travis.yml index fbc028a9a38..66baea8d1fd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -64,6 +64,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required # Docker runs will write files as root, so avoid caching for this shard. cache: false @@ -94,6 +95,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -120,6 +122,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -146,6 +149,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -172,6 +176,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -198,6 +203,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -224,6 +230,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -250,6 +257,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -276,6 +284,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -302,6 +311,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -328,6 +338,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -354,6 +365,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: @@ -380,6 +392,7 @@ matrix: - os: linux dist: trusty + group: deprecated-2017Q2 sudo: required addons: apt: diff --git a/src/python/pants/util/dirutil.py b/src/python/pants/util/dirutil.py index e0e9aeaae25..9cbbfbd679b 100644 --- a/src/python/pants/util/dirutil.py +++ b/src/python/pants/util/dirutil.py @@ -250,7 +250,10 @@ def safe_concurrent_creation(target_path): tmp_path = '{}.tmp.{}'.format(target_path, uuid.uuid4().hex) try: yield tmp_path - finally: + except Exception: + rm_rf(tmp_path) + raise + else: if os.path.exists(tmp_path): safe_concurrent_rename(tmp_path, target_path) diff --git a/tests/python/pants_test/util/test_dirutil.py b/tests/python/pants_test/util/test_dirutil.py index a4caa7eae70..8ba4a5fb90e 100644 --- a/tests/python/pants_test/util/test_dirutil.py +++ b/tests/python/pants_test/util/test_dirutil.py @@ -264,7 +264,7 @@ def test_safe_concurrent_creation_noop(self): self.assertFalse(os.path.exists(expected_file)) self.assertTrue(os.path.exists(os.path.dirname(expected_file))) - def test_safe_concurrent_creation_exception_still_renames(self): + def test_safe_concurrent_creation_exception_handling(self): with temporary_dir() as td: expected_file = os.path.join(td, 'expected_file') @@ -275,7 +275,7 @@ def test_safe_concurrent_creation_exception_still_renames(self): raise ZeroDivisionError('zomg') self.assertFalse(os.path.exists(safe_path)) - self.assertTrue(os.path.exists(expected_file)) + self.assertFalse(os.path.exists(expected_file)) def test_safe_rm_oldest_items_in_dir(self): with temporary_dir() as td:
sunpy__sunpy-1505
EIT data fails with wcsaxes The wcs information in the EIT header is not being identified as celestial axes by wcslib (inside astropy.wcs). This means that wcs is not detecting them as angular axes properly so therefore the set to arcsecond formatting is failing.
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nHelpers and Functions to make WCSAxes work in SunPy\n\"\"\"\nimport warnings\n\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\ntry:\n import wcsaxes\n HAVE_WCSAXES = True\n\nexcept ImportError:\n HAVE_WCSAXES = False\n warnings.warn(\"SunPy plotting is improved by installing the WCSAxes module: http://wcsaxes.readthedocs.org/en/latest/index.html\")\n\nFORCE_NO_WCSAXES = False\n\n__all__ = ['HAVE_WCSAXES', 'is_wcsaxes', 'FORCE_NO_WCSAXES']\n\ndef is_wcsaxes(axes):\n \"\"\"\n Test a matplotlib Axes object to see if it is an instance of WCSAxes\n\n Parameters\n ----------\n axes : matplotlib Axes Object\n Axes to test\n\n Returns\n -------\n result : bool\n Result of the test\n \"\"\"\n\n if HAVE_WCSAXES and not FORCE_NO_WCSAXES:\n return isinstance(axes, wcsaxes.WCSAxes)\n else:\n return False\n\n\ndef gca_wcs(wcs, fig=None):\n \"\"\"\n Get the current axes, and return a WCSAxes if possible\n \"\"\"\n\n if not fig:\n fig = plt.gcf()\n\n if not len(fig.get_axes()):\n if HAVE_WCSAXES and not FORCE_NO_WCSAXES:\n ax = plt.gca(projection=wcs)\n else:\n ax = plt.gca()\n\n else:\n ax = plt.gca()\n\n return ax\n\ndef get_world_transform(axes):\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n\n return transform\n\ndef default_wcs_grid(axes):\n \"\"\"\n Apply some default wcsaxes grid formatting\n \"\"\"\n if not isinstance(axes, wcsaxes.WCSAxes):\n raise TypeError(\"This axes is not a WCSAxes\")\n\n x = axes.coords[0]\n y = axes.coords[1]\n\n x.set_ticks(color='white')\n y.set_ticks(color='white')\n\n x.set_ticks_position('bl')\n y.set_ticks_position('bl')\n\n x.set_major_formatter('s.s')\n y.set_major_formatter('s.s')\n\n axes.coords.grid(color='white', alpha=0.6)\n\ndef wcsaxes_heliographic_overlay(axes):\n \"\"\"\n Draw a heliographic overlay using wcsaxes\n \"\"\"\n overlay = axes.get_coords_overlay('heliographicstonyhurst')\n\n lon = overlay[0]\n lat = overlay[1]\n\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n\n lon.set_axislabel('Solar Longitude')\n lat.set_axislabel('Solar Latitude')\n\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n\n lon.set_ticks(spacing=10. * u.deg, color='white')\n lat.set_ticks(spacing=10. * u.deg, color='white')\n\n overlay.grid(color='white', alpha=0.5)\n\n return overlay\n", "path": "sunpy/visualization/wcsaxes_compat.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nHelpers and Functions to make WCSAxes work in SunPy\n\"\"\"\nimport warnings\n\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\ntry:\n import wcsaxes\n HAVE_WCSAXES = True\n\nexcept ImportError:\n HAVE_WCSAXES = False\n warnings.warn(\"SunPy plotting is improved by installing the WCSAxes module: http://wcsaxes.readthedocs.org/en/latest/index.html\")\n\nFORCE_NO_WCSAXES = False\n\n__all__ = ['HAVE_WCSAXES', 'is_wcsaxes', 'FORCE_NO_WCSAXES']\n\ndef is_wcsaxes(axes):\n \"\"\"\n Test a matplotlib Axes object to see if it is an instance of WCSAxes\n\n Parameters\n ----------\n axes : matplotlib Axes Object\n Axes to test\n\n Returns\n -------\n result : bool\n Result of the test\n \"\"\"\n\n if HAVE_WCSAXES and not FORCE_NO_WCSAXES:\n return isinstance(axes, wcsaxes.WCSAxes)\n else:\n return False\n\n\ndef gca_wcs(wcs, fig=None):\n \"\"\"\n Get the current axes, and return a WCSAxes if possible\n \"\"\"\n\n if not fig:\n fig = plt.gcf()\n\n if not len(fig.get_axes()):\n if HAVE_WCSAXES and not FORCE_NO_WCSAXES:\n ax = plt.gca(projection=wcs)\n else:\n ax = plt.gca()\n\n else:\n ax = plt.gca()\n\n return ax\n\ndef get_world_transform(axes):\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n\n return transform\n\ndef default_wcs_grid(axes):\n \"\"\"\n Apply some default wcsaxes grid formatting\n \"\"\"\n if not isinstance(axes, wcsaxes.WCSAxes):\n raise TypeError(\"This axes is not a WCSAxes\")\n\n x = axes.coords[0]\n y = axes.coords[1]\n\n x.set_ticks(color='white')\n y.set_ticks(color='white')\n\n x.set_ticks_position('bl')\n y.set_ticks_position('bl')\n\n if x.coord_type != 'longitude':\n x.set_coord_type('longitude', coord_wrap=180.)\n if y.coord_type != 'latitude':\n y.set_coord_type('latitude')\n\n x.set_major_formatter('s.s')\n y.set_major_formatter('s.s')\n\n axes.coords.grid(color='white', alpha=0.6)\n\ndef wcsaxes_heliographic_overlay(axes):\n \"\"\"\n Draw a heliographic overlay using wcsaxes\n \"\"\"\n overlay = axes.get_coords_overlay('heliographicstonyhurst')\n\n lon = overlay[0]\n lat = overlay[1]\n\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n\n lon.set_axislabel('Solar Longitude')\n lat.set_axislabel('Solar Latitude')\n\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n\n lon.set_ticks(spacing=10. * u.deg, color='white')\n lat.set_ticks(spacing=10. * u.deg, color='white')\n\n overlay.grid(color='white', alpha=0.5)\n\n return overlay\n", "path": "sunpy/visualization/wcsaxes_compat.py" } ]
diff --git a/sunpy/visualization/wcsaxes_compat.py b/sunpy/visualization/wcsaxes_compat.py index 0bf4f5050d2..d541e6887e3 100644 --- a/sunpy/visualization/wcsaxes_compat.py +++ b/sunpy/visualization/wcsaxes_compat.py @@ -84,6 +84,11 @@ def default_wcs_grid(axes): x.set_ticks_position('bl') y.set_ticks_position('bl') + if x.coord_type != 'longitude': + x.set_coord_type('longitude', coord_wrap=180.) + if y.coord_type != 'latitude': + y.set_coord_type('latitude') + x.set_major_formatter('s.s') y.set_major_formatter('s.s')
locustio__locust-2533
TaskSet missing _cp_last_run attribute ### Prerequisites - [X] I am using [the latest version of Locust](https://github.com/locustio/locust/releases/) - [X] I am reporting a bug, not asking a question ### Description When using the `constant_pacing` wait time strategy in a task set, an error is thrown. `TaskSet` instances do not have a `_cp_last_run` attribute. ``` During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\P3701275\projects\20240104_TaskSetTest\venv\Lib\site-packages\locust\user\task.py", line 343, in run self.execute_next_task() File "C:\Users\P3701275\projects\20240104_TaskSetTest\venv\Lib\site-packages\locust\user\task.py", line 376, in execute_next_task self.execute_task(self._task_queue.pop(0)) File "C:\Users\P3701275\projects\20240104_TaskSetTest\venv\Lib\site-packages\locust\user\task.py", line 494, in execute_task task(self.user).run() File "C:\Users\P3701275\projects\20240104_TaskSetTest\venv\Lib\site-packages\locust\user\task.py", line 371, in run self.wait() File "C:\Users\P3701275\projects\20240104_TaskSetTest\venv\Lib\site-packages\locust\user\task.py", line 449, in wait self._sleep(self.wait_time()) ^^^^^^^^^^^^^^^^ File "C:\Users\P3701275\projects\20240104_TaskSetTest\venv\Lib\site-packages\locust\user\wait_time.py", line 52, in wait_time_func run_time = time() - self._cp_last_run - self._cp_last_wait_time ^^^^^^^^^^^^^^^^^ AttributeError: 'MyTaskSet' object has no attribute '_cp_last_run' ``` ### Command line locust -f locustfile_cp_nested.py -t 20s -H "http://www.google.com" --headless ### Locustfile contents ```python3 from locust import TaskSet, constant, task, HttpUser, constant_pacing class MyUser(HttpUser): wait_time = constant(1) @task class MyTaskSet(TaskSet): wait_time = constant_pacing(1) @task def do_something(self): self.client.get("/") ``` ### Python version 3.11 ### Locust version 2.20.1 ### Operating system Windows
[ { "content": "from __future__ import annotations\nimport logging\nimport random\nimport traceback\nfrom time import time\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n List,\n TypeVar,\n Optional,\n Type,\n overload,\n Dict,\n Set,\n Protocol,\n final,\n runtime_checkable,\n)\nimport gevent\nfrom gevent import GreenletExit\n\nfrom locust.exception import InterruptTaskSet, RescheduleTask, RescheduleTaskImmediately, StopUser, MissingWaitTimeError\n\nif TYPE_CHECKING:\n from locust import User\n\n\nlogger = logging.getLogger(__name__)\nTaskT = TypeVar(\"TaskT\", Callable[..., None], Type[\"TaskSet\"])\n\nLOCUST_STATE_RUNNING, LOCUST_STATE_WAITING, LOCUST_STATE_STOPPING = [\"running\", \"waiting\", \"stopping\"]\n\n\n@runtime_checkable\nclass TaskHolder(Protocol[TaskT]):\n tasks: List[TaskT]\n\n\n@overload\ndef task(weight: TaskT) -> TaskT:\n ...\n\n\n@overload\ndef task(weight: int) -> Callable[[TaskT], TaskT]:\n ...\n\n\ndef task(weight: TaskT | int = 1) -> TaskT | Callable[[TaskT], TaskT]:\n \"\"\"\n Used as a convenience decorator to be able to declare tasks for a User or a TaskSet\n inline in the class. Example::\n\n class ForumPage(TaskSet):\n @task(100)\n def read_thread(self):\n pass\n\n @task(7)\n def create_thread(self):\n pass\n\n @task(25)\n class ForumThread(TaskSet):\n @task\n def get_author(self):\n pass\n\n @task\n def get_created(self):\n pass\n \"\"\"\n\n def decorator_func(func):\n if func.__name__ in [\"on_stop\", \"on_start\"]:\n logging.warning(\n \"You have tagged your on_stop/start function with @task. This will make the method get called both as a task AND on stop/start.\"\n ) # this is usually not what the user intended\n if func.__name__ == \"run\":\n raise Exception(\n \"User.run() is a method used internally by Locust, and you must not override it or register it as a task\"\n )\n func.locust_task_weight = weight\n return func\n\n \"\"\"\n Check if task was used without parentheses (not called), like this::\n\n @task\n def my_task(self)\n pass\n \"\"\"\n if callable(weight):\n func = weight\n weight = 1\n return decorator_func(func)\n else:\n return decorator_func\n\n\ndef tag(*tags: str) -> Callable[[TaskT], TaskT]:\n \"\"\"\n Decorator for tagging tasks and TaskSets with the given tag name. You can\n then limit the test to only execute tasks that are tagged with any of the\n tags provided by the :code:`--tags` command-line argument. Example::\n\n class ForumPage(TaskSet):\n @tag('thread')\n @task(100)\n def read_thread(self):\n pass\n\n @tag('thread')\n @tag('post')\n @task(7)\n def create_thread(self):\n pass\n\n @tag('post')\n @task(11)\n def comment(self):\n pass\n \"\"\"\n\n def decorator_func(decorated):\n if hasattr(decorated, \"tasks\"):\n decorated.tasks = list(map(tag(*tags), decorated.tasks))\n else:\n if \"locust_tag_set\" not in decorated.__dict__:\n decorated.locust_tag_set = set()\n decorated.locust_tag_set |= set(tags)\n return decorated\n\n if len(tags) == 0 or callable(tags[0]):\n raise ValueError(\"No tag name was supplied\")\n\n return decorator_func\n\n\ndef get_tasks_from_base_classes(bases, class_dict):\n \"\"\"\n Function used by both TaskSetMeta and UserMeta for collecting all declared tasks\n on the TaskSet/User class and all its base classes\n \"\"\"\n new_tasks = []\n for base in bases:\n if hasattr(base, \"tasks\") and base.tasks:\n new_tasks += base.tasks\n\n if \"tasks\" in class_dict and class_dict[\"tasks\"] is not None:\n tasks = class_dict[\"tasks\"]\n if isinstance(tasks, dict):\n tasks = tasks.items()\n\n for task in tasks:\n if isinstance(task, tuple):\n task, count = task\n for _ in range(count):\n new_tasks.append(task)\n else:\n new_tasks.append(task)\n\n for item in class_dict.values():\n if \"locust_task_weight\" in dir(item):\n for i in range(item.locust_task_weight):\n new_tasks.append(item)\n\n return new_tasks\n\n\ndef filter_tasks_by_tags(\n task_holder: Type[TaskHolder],\n tags: Optional[Set[str]] = None,\n exclude_tags: Optional[Set[str]] = None,\n checked: Optional[Dict[TaskT, bool]] = None,\n):\n \"\"\"\n Function used by Environment to recursively remove any tasks/TaskSets from a TaskSet/User that\n shouldn't be executed according to the tag options\n \"\"\"\n\n new_tasks = []\n if checked is None:\n checked = {}\n for task in task_holder.tasks:\n if task in checked:\n if checked[task]:\n new_tasks.append(task)\n continue\n\n passing = True\n if hasattr(task, \"tasks\"):\n filter_tasks_by_tags(task, tags, exclude_tags, checked)\n passing = len(task.tasks) > 0\n else:\n if tags is not None:\n passing &= \"locust_tag_set\" in dir(task) and len(task.locust_tag_set & tags) > 0\n if exclude_tags is not None:\n passing &= \"locust_tag_set\" not in dir(task) or len(task.locust_tag_set & exclude_tags) == 0\n\n if passing:\n new_tasks.append(task)\n checked[task] = passing\n\n task_holder.tasks = new_tasks\n if not new_tasks:\n logging.warning(f\"{task_holder.__name__} had no tasks left after filtering, instantiating it will fail!\")\n\n\nclass TaskSetMeta(type):\n \"\"\"\n Meta class for the main User class. It's used to allow User classes to specify task execution\n ratio using an {task:int} dict, or a [(task0,int), ..., (taskN,int)] list.\n \"\"\"\n\n def __new__(mcs, classname, bases, class_dict):\n class_dict[\"tasks\"] = get_tasks_from_base_classes(bases, class_dict)\n return type.__new__(mcs, classname, bases, class_dict)\n\n\nclass TaskSet(metaclass=TaskSetMeta):\n \"\"\"\n Class defining a set of tasks that a User will execute.\n\n When a TaskSet starts running, it will pick a task from the *tasks* attribute,\n execute it, and then sleep for the number of seconds returned by its *wait_time*\n function. If no wait_time method has been declared on the TaskSet, it'll call the\n wait_time function on the User by default. It will then schedule another task\n for execution and so on.\n\n TaskSets can be nested, which means that a TaskSet's *tasks* attribute can contain\n another TaskSet. If the nested TaskSet is scheduled to be executed, it will be\n instantiated and called from the currently executing TaskSet. Execution in the\n currently running TaskSet will then be handed over to the nested TaskSet which will\n continue to run until it throws an InterruptTaskSet exception, which is done when\n :py:meth:`TaskSet.interrupt() <locust.TaskSet.interrupt>` is called. (execution\n will then continue in the first TaskSet).\n \"\"\"\n\n tasks: List[TaskSet | Callable] = []\n \"\"\"\n Collection of python callables and/or TaskSet classes that the User(s) will run.\n\n If tasks is a list, the task to be performed will be picked randomly.\n\n If tasks is a *(callable,int)* list of two-tuples, or a {callable:int} dict,\n the task to be performed will be picked randomly, but each task will be weighted\n according to its corresponding int value. So in the following case, *ThreadPage* will\n be fifteen times more likely to be picked than *write_post*::\n\n class ForumPage(TaskSet):\n tasks = {ThreadPage:15, write_post:1}\n \"\"\"\n\n min_wait: Optional[float] = None\n \"\"\"\n Deprecated: Use wait_time instead.\n Minimum waiting time between the execution of user tasks. Can be used to override\n the min_wait defined in the root User class, which will be used if not set on the\n TaskSet.\n \"\"\"\n\n max_wait: Optional[float] = None\n \"\"\"\n Deprecated: Use wait_time instead.\n Maximum waiting time between the execution of user tasks. Can be used to override\n the max_wait defined in the root User class, which will be used if not set on the\n TaskSet.\n \"\"\"\n\n wait_function = None\n \"\"\"\n Deprecated: Use wait_time instead.\n Function used to calculate waiting time between the execution of user tasks in milliseconds.\n Can be used to override the wait_function defined in the root User class, which will be used\n if not set on the TaskSet.\n \"\"\"\n\n _user: \"User\"\n _parent: \"User\"\n\n def __init__(self, parent: \"User\") -> None:\n self._task_queue: List[Callable] = []\n self._time_start = time()\n\n if isinstance(parent, TaskSet):\n self._user = parent.user\n else:\n self._user = parent\n\n self._parent = parent\n\n # if this class doesn't have a min_wait, max_wait or wait_function defined, copy it from Locust\n if not self.min_wait:\n self.min_wait = self.user.min_wait\n if not self.max_wait:\n self.max_wait = self.user.max_wait\n if not self.wait_function:\n self.wait_function = self.user.wait_function\n\n @property\n def user(self) -> \"User\":\n \"\"\":py:class:`User <locust.User>` instance that this TaskSet was created by\"\"\"\n return self._user\n\n @property\n def parent(self):\n \"\"\"Parent TaskSet instance of this TaskSet (or :py:class:`User <locust.User>` if this is not a nested TaskSet)\"\"\"\n return self._parent\n\n def on_start(self):\n \"\"\"\n Called when a User starts executing this TaskSet\n \"\"\"\n pass\n\n def on_stop(self):\n \"\"\"\n Called when a User stops executing this TaskSet. E.g. when TaskSet.interrupt() is called\n or when the User is killed\n \"\"\"\n pass\n\n @final\n def run(self):\n try:\n self.on_start()\n except InterruptTaskSet as e:\n if e.reschedule:\n raise RescheduleTaskImmediately(e.reschedule).with_traceback(e.__traceback__)\n else:\n raise RescheduleTask(e.reschedule).with_traceback(e.__traceback__)\n\n while True:\n try:\n if not self._task_queue:\n self.schedule_task(self.get_next_task())\n\n try:\n if self.user._state == LOCUST_STATE_STOPPING:\n raise StopUser()\n self.execute_next_task()\n except RescheduleTaskImmediately:\n pass\n except RescheduleTask:\n self.wait()\n else:\n self.wait()\n except InterruptTaskSet as e:\n try:\n self.on_stop()\n except (StopUser, GreenletExit):\n raise\n except Exception:\n logging.error(\"Uncaught exception in on_stop: \\n%s\", traceback.format_exc())\n if e.reschedule:\n raise RescheduleTaskImmediately(e.reschedule) from e\n else:\n raise RescheduleTask(e.reschedule) from e\n except (StopUser, GreenletExit):\n try:\n self.on_stop()\n except Exception:\n logging.error(\"Uncaught exception in on_stop: \\n%s\", traceback.format_exc())\n raise\n except Exception as e:\n self.user.environment.events.user_error.fire(user_instance=self, exception=e, tb=e.__traceback__)\n if self.user.environment.catch_exceptions:\n logger.error(\"%s\\n%s\", e, traceback.format_exc())\n self.wait()\n else:\n raise\n\n def execute_next_task(self):\n self.execute_task(self._task_queue.pop(0))\n\n def execute_task(self, task):\n # check if the function is a method bound to the current locust, and if so, don't pass self as first argument\n if hasattr(task, \"__self__\") and task.__self__ == self:\n # task is a bound method on self\n task()\n elif hasattr(task, \"tasks\") and issubclass(task, TaskSet):\n # task is another (nested) TaskSet class\n task(self).run()\n else:\n # task is a function\n task(self)\n\n def schedule_task(self, task_callable, first=False):\n \"\"\"\n Add a task to the User's task execution queue.\n\n :param task_callable: User task to schedule.\n :param first: Optional keyword argument. If True, the task will be put first in the queue.\n \"\"\"\n if first:\n self._task_queue.insert(0, task_callable)\n else:\n self._task_queue.append(task_callable)\n\n def get_next_task(self):\n if not self.tasks:\n if getattr(self, \"task\", None):\n extra_message = \", but you have set a 'task' attribute - maybe you meant to set 'tasks'?\"\n else:\n extra_message = \".\"\n raise Exception(\n f\"No tasks defined on {self.__class__.__name__}{extra_message} use the @task decorator or set the 'tasks' attribute of the TaskSet\"\n )\n return random.choice(self.tasks)\n\n def wait_time(self):\n \"\"\"\n Method that returns the time (in seconds) between the execution of tasks.\n\n Example::\n\n from locust import TaskSet, between\n class Tasks(TaskSet):\n wait_time = between(3, 25)\n \"\"\"\n if self.user.wait_time:\n return self.user.wait_time()\n elif self.min_wait is not None and self.max_wait is not None:\n return random.randint(self.min_wait, self.max_wait) / 1000.0\n else:\n raise MissingWaitTimeError(\n \"You must define a wait_time method on either the %s or %s class\"\n % (\n type(self.user).__name__,\n type(self).__name__,\n )\n )\n\n def wait(self):\n \"\"\"\n Make the running user sleep for a duration defined by the Locust.wait_time\n function (or TaskSet.wait_time function if it's been defined).\n\n The user can also be killed gracefully while it's sleeping, so calling this\n method within a task makes it possible for a user to be killed mid-task, even if you've\n set a stop_timeout. If this behaviour is not desired you should make the user wait using\n gevent.sleep() instead.\n \"\"\"\n if self.user._state == LOCUST_STATE_STOPPING:\n raise StopUser()\n self.user._state = LOCUST_STATE_WAITING\n self._sleep(self.wait_time())\n if self.user._state == LOCUST_STATE_STOPPING:\n raise StopUser()\n self.user._state = LOCUST_STATE_RUNNING\n\n def _sleep(self, seconds):\n gevent.sleep(seconds)\n\n def interrupt(self, reschedule=True):\n \"\"\"\n Interrupt the TaskSet and hand over execution control back to the parent TaskSet.\n\n If *reschedule* is True (default), the parent User will immediately re-schedule,\n and execute, a new task.\n \"\"\"\n raise InterruptTaskSet(reschedule)\n\n @property\n def client(self):\n \"\"\"\n Shortcut to the client :py:attr:`client <locust.User.client>` attribute of this TaskSet's :py:class:`User <locust.User>`\n \"\"\"\n return self.user.client\n\n\nclass DefaultTaskSet(TaskSet):\n \"\"\"\n Default root TaskSet that executes tasks in User.tasks.\n It executes tasks declared directly on the Locust with the user instance as the task argument.\n \"\"\"\n\n def get_next_task(self):\n if not self.user.tasks:\n if getattr(self.user, \"task\", None):\n extra_message = \", but you have set a 'task' attribute on your class - maybe you meant to set 'tasks'?\"\n else:\n extra_message = \".\"\n raise Exception(\n f\"No tasks defined on {self.user.__class__.__name__}{extra_message} Use the @task decorator or set the 'tasks' attribute of the User (or mark it as abstract = True if you only intend to subclass it)\"\n )\n return random.choice(self.user.tasks)\n\n def execute_task(self, task):\n if hasattr(task, \"tasks\") and issubclass(task, TaskSet):\n # task is (nested) TaskSet class\n task(self.user).run()\n else:\n # task is a function\n task(self.user)\n", "path": "locust/user/task.py" } ]
[ { "content": "from __future__ import annotations\nimport logging\nimport random\nimport traceback\nfrom time import time\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n List,\n TypeVar,\n Optional,\n Type,\n overload,\n Dict,\n Set,\n Protocol,\n final,\n runtime_checkable,\n)\nimport gevent\nfrom gevent import GreenletExit\n\nfrom locust.exception import InterruptTaskSet, RescheduleTask, RescheduleTaskImmediately, StopUser, MissingWaitTimeError\n\nif TYPE_CHECKING:\n from locust import User\n\n\nlogger = logging.getLogger(__name__)\nTaskT = TypeVar(\"TaskT\", Callable[..., None], Type[\"TaskSet\"])\n\nLOCUST_STATE_RUNNING, LOCUST_STATE_WAITING, LOCUST_STATE_STOPPING = [\"running\", \"waiting\", \"stopping\"]\n\n\n@runtime_checkable\nclass TaskHolder(Protocol[TaskT]):\n tasks: List[TaskT]\n\n\n@overload\ndef task(weight: TaskT) -> TaskT:\n ...\n\n\n@overload\ndef task(weight: int) -> Callable[[TaskT], TaskT]:\n ...\n\n\ndef task(weight: TaskT | int = 1) -> TaskT | Callable[[TaskT], TaskT]:\n \"\"\"\n Used as a convenience decorator to be able to declare tasks for a User or a TaskSet\n inline in the class. Example::\n\n class ForumPage(TaskSet):\n @task(100)\n def read_thread(self):\n pass\n\n @task(7)\n def create_thread(self):\n pass\n\n @task(25)\n class ForumThread(TaskSet):\n @task\n def get_author(self):\n pass\n\n @task\n def get_created(self):\n pass\n \"\"\"\n\n def decorator_func(func):\n if func.__name__ in [\"on_stop\", \"on_start\"]:\n logging.warning(\n \"You have tagged your on_stop/start function with @task. This will make the method get called both as a task AND on stop/start.\"\n ) # this is usually not what the user intended\n if func.__name__ == \"run\":\n raise Exception(\n \"User.run() is a method used internally by Locust, and you must not override it or register it as a task\"\n )\n func.locust_task_weight = weight\n return func\n\n \"\"\"\n Check if task was used without parentheses (not called), like this::\n\n @task\n def my_task(self)\n pass\n \"\"\"\n if callable(weight):\n func = weight\n weight = 1\n return decorator_func(func)\n else:\n return decorator_func\n\n\ndef tag(*tags: str) -> Callable[[TaskT], TaskT]:\n \"\"\"\n Decorator for tagging tasks and TaskSets with the given tag name. You can\n then limit the test to only execute tasks that are tagged with any of the\n tags provided by the :code:`--tags` command-line argument. Example::\n\n class ForumPage(TaskSet):\n @tag('thread')\n @task(100)\n def read_thread(self):\n pass\n\n @tag('thread')\n @tag('post')\n @task(7)\n def create_thread(self):\n pass\n\n @tag('post')\n @task(11)\n def comment(self):\n pass\n \"\"\"\n\n def decorator_func(decorated):\n if hasattr(decorated, \"tasks\"):\n decorated.tasks = list(map(tag(*tags), decorated.tasks))\n else:\n if \"locust_tag_set\" not in decorated.__dict__:\n decorated.locust_tag_set = set()\n decorated.locust_tag_set |= set(tags)\n return decorated\n\n if len(tags) == 0 or callable(tags[0]):\n raise ValueError(\"No tag name was supplied\")\n\n return decorator_func\n\n\ndef get_tasks_from_base_classes(bases, class_dict):\n \"\"\"\n Function used by both TaskSetMeta and UserMeta for collecting all declared tasks\n on the TaskSet/User class and all its base classes\n \"\"\"\n new_tasks = []\n for base in bases:\n if hasattr(base, \"tasks\") and base.tasks:\n new_tasks += base.tasks\n\n if \"tasks\" in class_dict and class_dict[\"tasks\"] is not None:\n tasks = class_dict[\"tasks\"]\n if isinstance(tasks, dict):\n tasks = tasks.items()\n\n for task in tasks:\n if isinstance(task, tuple):\n task, count = task\n for _ in range(count):\n new_tasks.append(task)\n else:\n new_tasks.append(task)\n\n for item in class_dict.values():\n if \"locust_task_weight\" in dir(item):\n for i in range(item.locust_task_weight):\n new_tasks.append(item)\n\n return new_tasks\n\n\ndef filter_tasks_by_tags(\n task_holder: Type[TaskHolder],\n tags: Optional[Set[str]] = None,\n exclude_tags: Optional[Set[str]] = None,\n checked: Optional[Dict[TaskT, bool]] = None,\n):\n \"\"\"\n Function used by Environment to recursively remove any tasks/TaskSets from a TaskSet/User that\n shouldn't be executed according to the tag options\n \"\"\"\n\n new_tasks = []\n if checked is None:\n checked = {}\n for task in task_holder.tasks:\n if task in checked:\n if checked[task]:\n new_tasks.append(task)\n continue\n\n passing = True\n if hasattr(task, \"tasks\"):\n filter_tasks_by_tags(task, tags, exclude_tags, checked)\n passing = len(task.tasks) > 0\n else:\n if tags is not None:\n passing &= \"locust_tag_set\" in dir(task) and len(task.locust_tag_set & tags) > 0\n if exclude_tags is not None:\n passing &= \"locust_tag_set\" not in dir(task) or len(task.locust_tag_set & exclude_tags) == 0\n\n if passing:\n new_tasks.append(task)\n checked[task] = passing\n\n task_holder.tasks = new_tasks\n if not new_tasks:\n logging.warning(f\"{task_holder.__name__} had no tasks left after filtering, instantiating it will fail!\")\n\n\nclass TaskSetMeta(type):\n \"\"\"\n Meta class for the main User class. It's used to allow User classes to specify task execution\n ratio using an {task:int} dict, or a [(task0,int), ..., (taskN,int)] list.\n \"\"\"\n\n def __new__(mcs, classname, bases, class_dict):\n class_dict[\"tasks\"] = get_tasks_from_base_classes(bases, class_dict)\n return type.__new__(mcs, classname, bases, class_dict)\n\n\nclass TaskSet(metaclass=TaskSetMeta):\n \"\"\"\n Class defining a set of tasks that a User will execute.\n\n When a TaskSet starts running, it will pick a task from the *tasks* attribute,\n execute it, and then sleep for the number of seconds returned by its *wait_time*\n function. If no wait_time method has been declared on the TaskSet, it'll call the\n wait_time function on the User by default. It will then schedule another task\n for execution and so on.\n\n TaskSets can be nested, which means that a TaskSet's *tasks* attribute can contain\n another TaskSet. If the nested TaskSet is scheduled to be executed, it will be\n instantiated and called from the currently executing TaskSet. Execution in the\n currently running TaskSet will then be handed over to the nested TaskSet which will\n continue to run until it throws an InterruptTaskSet exception, which is done when\n :py:meth:`TaskSet.interrupt() <locust.TaskSet.interrupt>` is called. (execution\n will then continue in the first TaskSet).\n \"\"\"\n\n tasks: List[TaskSet | Callable] = []\n \"\"\"\n Collection of python callables and/or TaskSet classes that the User(s) will run.\n\n If tasks is a list, the task to be performed will be picked randomly.\n\n If tasks is a *(callable,int)* list of two-tuples, or a {callable:int} dict,\n the task to be performed will be picked randomly, but each task will be weighted\n according to its corresponding int value. So in the following case, *ThreadPage* will\n be fifteen times more likely to be picked than *write_post*::\n\n class ForumPage(TaskSet):\n tasks = {ThreadPage:15, write_post:1}\n \"\"\"\n\n min_wait: Optional[float] = None\n \"\"\"\n Deprecated: Use wait_time instead.\n Minimum waiting time between the execution of user tasks. Can be used to override\n the min_wait defined in the root User class, which will be used if not set on the\n TaskSet.\n \"\"\"\n\n max_wait: Optional[float] = None\n \"\"\"\n Deprecated: Use wait_time instead.\n Maximum waiting time between the execution of user tasks. Can be used to override\n the max_wait defined in the root User class, which will be used if not set on the\n TaskSet.\n \"\"\"\n\n wait_function = None\n \"\"\"\n Deprecated: Use wait_time instead.\n Function used to calculate waiting time between the execution of user tasks in milliseconds.\n Can be used to override the wait_function defined in the root User class, which will be used\n if not set on the TaskSet.\n \"\"\"\n\n _user: \"User\"\n _parent: \"User\"\n\n def __init__(self, parent: \"User\") -> None:\n self._task_queue: List[Callable] = []\n self._time_start = time()\n\n if isinstance(parent, TaskSet):\n self._user = parent.user\n else:\n self._user = parent\n\n self._parent = parent\n\n # if this class doesn't have a min_wait, max_wait or wait_function defined, copy it from Locust\n if not self.min_wait:\n self.min_wait = self.user.min_wait\n if not self.max_wait:\n self.max_wait = self.user.max_wait\n if not self.wait_function:\n self.wait_function = self.user.wait_function\n self._cp_last_run = time() # used by constant_pacing wait_time\n\n @property\n def user(self) -> \"User\":\n \"\"\":py:class:`User <locust.User>` instance that this TaskSet was created by\"\"\"\n return self._user\n\n @property\n def parent(self):\n \"\"\"Parent TaskSet instance of this TaskSet (or :py:class:`User <locust.User>` if this is not a nested TaskSet)\"\"\"\n return self._parent\n\n def on_start(self):\n \"\"\"\n Called when a User starts executing this TaskSet\n \"\"\"\n pass\n\n def on_stop(self):\n \"\"\"\n Called when a User stops executing this TaskSet. E.g. when TaskSet.interrupt() is called\n or when the User is killed\n \"\"\"\n pass\n\n @final\n def run(self):\n try:\n self.on_start()\n except InterruptTaskSet as e:\n if e.reschedule:\n raise RescheduleTaskImmediately(e.reschedule).with_traceback(e.__traceback__)\n else:\n raise RescheduleTask(e.reschedule).with_traceback(e.__traceback__)\n\n while True:\n try:\n if not self._task_queue:\n self.schedule_task(self.get_next_task())\n\n try:\n if self.user._state == LOCUST_STATE_STOPPING:\n raise StopUser()\n self.execute_next_task()\n except RescheduleTaskImmediately:\n pass\n except RescheduleTask:\n self.wait()\n else:\n self.wait()\n except InterruptTaskSet as e:\n try:\n self.on_stop()\n except (StopUser, GreenletExit):\n raise\n except Exception:\n logging.error(\"Uncaught exception in on_stop: \\n%s\", traceback.format_exc())\n if e.reschedule:\n raise RescheduleTaskImmediately(e.reschedule) from e\n else:\n raise RescheduleTask(e.reschedule) from e\n except (StopUser, GreenletExit):\n try:\n self.on_stop()\n except Exception:\n logging.error(\"Uncaught exception in on_stop: \\n%s\", traceback.format_exc())\n raise\n except Exception as e:\n self.user.environment.events.user_error.fire(user_instance=self, exception=e, tb=e.__traceback__)\n if self.user.environment.catch_exceptions:\n logger.error(\"%s\\n%s\", e, traceback.format_exc())\n self.wait()\n else:\n raise\n\n def execute_next_task(self):\n self.execute_task(self._task_queue.pop(0))\n\n def execute_task(self, task):\n # check if the function is a method bound to the current locust, and if so, don't pass self as first argument\n if hasattr(task, \"__self__\") and task.__self__ == self:\n # task is a bound method on self\n task()\n elif hasattr(task, \"tasks\") and issubclass(task, TaskSet):\n # task is another (nested) TaskSet class\n task(self).run()\n else:\n # task is a function\n task(self)\n\n def schedule_task(self, task_callable, first=False):\n \"\"\"\n Add a task to the User's task execution queue.\n\n :param task_callable: User task to schedule.\n :param first: Optional keyword argument. If True, the task will be put first in the queue.\n \"\"\"\n if first:\n self._task_queue.insert(0, task_callable)\n else:\n self._task_queue.append(task_callable)\n\n def get_next_task(self):\n if not self.tasks:\n if getattr(self, \"task\", None):\n extra_message = \", but you have set a 'task' attribute - maybe you meant to set 'tasks'?\"\n else:\n extra_message = \".\"\n raise Exception(\n f\"No tasks defined on {self.__class__.__name__}{extra_message} use the @task decorator or set the 'tasks' attribute of the TaskSet\"\n )\n return random.choice(self.tasks)\n\n def wait_time(self):\n \"\"\"\n Method that returns the time (in seconds) between the execution of tasks.\n\n Example::\n\n from locust import TaskSet, between\n class Tasks(TaskSet):\n wait_time = between(3, 25)\n \"\"\"\n if self.user.wait_time:\n return self.user.wait_time()\n elif self.min_wait is not None and self.max_wait is not None:\n return random.randint(self.min_wait, self.max_wait) / 1000.0\n else:\n raise MissingWaitTimeError(\n \"You must define a wait_time method on either the %s or %s class\"\n % (\n type(self.user).__name__,\n type(self).__name__,\n )\n )\n\n def wait(self):\n \"\"\"\n Make the running user sleep for a duration defined by the Locust.wait_time\n function (or TaskSet.wait_time function if it's been defined).\n\n The user can also be killed gracefully while it's sleeping, so calling this\n method within a task makes it possible for a user to be killed mid-task, even if you've\n set a stop_timeout. If this behaviour is not desired you should make the user wait using\n gevent.sleep() instead.\n \"\"\"\n if self.user._state == LOCUST_STATE_STOPPING:\n raise StopUser()\n self.user._state = LOCUST_STATE_WAITING\n self._sleep(self.wait_time())\n if self.user._state == LOCUST_STATE_STOPPING:\n raise StopUser()\n self.user._state = LOCUST_STATE_RUNNING\n\n def _sleep(self, seconds):\n gevent.sleep(seconds)\n\n def interrupt(self, reschedule=True):\n \"\"\"\n Interrupt the TaskSet and hand over execution control back to the parent TaskSet.\n\n If *reschedule* is True (default), the parent User will immediately re-schedule,\n and execute, a new task.\n \"\"\"\n raise InterruptTaskSet(reschedule)\n\n @property\n def client(self):\n \"\"\"\n Shortcut to the client :py:attr:`client <locust.User.client>` attribute of this TaskSet's :py:class:`User <locust.User>`\n \"\"\"\n return self.user.client\n\n\nclass DefaultTaskSet(TaskSet):\n \"\"\"\n Default root TaskSet that executes tasks in User.tasks.\n It executes tasks declared directly on the Locust with the user instance as the task argument.\n \"\"\"\n\n def get_next_task(self):\n if not self.user.tasks:\n if getattr(self.user, \"task\", None):\n extra_message = \", but you have set a 'task' attribute on your class - maybe you meant to set 'tasks'?\"\n else:\n extra_message = \".\"\n raise Exception(\n f\"No tasks defined on {self.user.__class__.__name__}{extra_message} Use the @task decorator or set the 'tasks' attribute of the User (or mark it as abstract = True if you only intend to subclass it)\"\n )\n return random.choice(self.user.tasks)\n\n def execute_task(self, task):\n if hasattr(task, \"tasks\") and issubclass(task, TaskSet):\n # task is (nested) TaskSet class\n task(self.user).run()\n else:\n # task is a function\n task(self.user)\n", "path": "locust/user/task.py" } ]
diff --git a/locust/user/task.py b/locust/user/task.py index 76629316da..56d68848fe 100644 --- a/locust/user/task.py +++ b/locust/user/task.py @@ -298,6 +298,7 @@ def __init__(self, parent: "User") -> None: self.max_wait = self.user.max_wait if not self.wait_function: self.wait_function = self.user.wait_function + self._cp_last_run = time() # used by constant_pacing wait_time @property def user(self) -> "User":
pytorch__ignite-1324
Create a logo for Ignite ### 🚀 Feature @vfdev-5 asked me to work on a logo to represent Ignite and its relation to main PyTorch, so I’m opening this issue to post iterations and provide a place for discussion. All feedback is welcome! I'm looking forward to hearing the community's thoughts. These first iterations use the same orange as PyTorch (`#ee4c2c`)and maintain similar line weight (and sometimes the small circle) in order to show a visual relationship between the projects. The potential directions for Ignite’s logomark are meant to highlight Ignite’s strengths of simplicity and speed as well as the kick starting nature of some of its “out-of-the-box” features. ### Option 1-1 Compared to the current PyTorch logo, Option 1 is meant to be more a energetic spark than a steady, symmetrical flame. <img width="502" alt="1-1" src="https://user-images.githubusercontent.com/50221806/88605197-19e91480-d02e-11ea-9d97-5fc07286dc95.png"> <img width="502" alt="1-1color" src="https://user-images.githubusercontent.com/50221806/88605198-1a81ab00-d02e-11ea-8771-2f2713077df9.png"> ### Option 2-1 Option 2 is the furthest from the current PyTorch logo and revolves around the idea of a matchstick. The flame shape is closest to the current PyTorch logo of the different options. <img width="502" alt="2-1" src="https://user-images.githubusercontent.com/50221806/88605212-22d9e600-d02e-11ea-8678-eb0785fcd140.png"> <img width="502" alt="2-1color" src="https://user-images.githubusercontent.com/50221806/88605215-23727c80-d02e-11ea-89cf-b5ac451839ac.png"> ### Option 3-1 Another idea around a spark originating from a single point. Different flame shape and direction. <img width="502" alt="3-1" src="https://user-images.githubusercontent.com/50221806/88605221-2a998a80-d02e-11ea-85d0-8e9ac7b711c3.png"> <img width="502" alt="3-1color" src="https://user-images.githubusercontent.com/50221806/88605222-2a998a80-d02e-11ea-8372-87e1aa4eaca7.png"> ### Option 4-1 This developed as a variation of Option 3. While it is my least favorite, I wanted to provide an option that feels sharper and more explosive. <img width="502" alt="4-1" src="https://user-images.githubusercontent.com/50221806/88605232-2ff6d500-d02e-11ea-8078-e4d25c48d10b.png"> <img width="502" alt="4-1color" src="https://user-images.githubusercontent.com/50221806/88605234-308f6b80-d02e-11ea-99c9-8ba077afe688.png"> Update Circle CI docker image to pytorch 1.6 ## 🚀 Feature Following the release of 1.6 version of PyTorch, let's update Circle CI docker image : - https://github.com/pytorch/ignite/blob/master/.circleci/config.yml and add 1.5.1 version to pytorch compatibilty versions checking : - https://github.com/pytorch/ignite/blob/master/.github/workflows/pytorch-version-tests.yml
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\nimport ignite\nimport pytorch_sphinx_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = \"ignite\"\ncopyright = \"2020, PyTorch-Ignite Contributors\"\nauthor = \"PyTorch-Ignite Contributors\"\n\n# The short X.Y version\ntry:\n version = os.environ[\"code_version\"]\n if \"master\" in version:\n version = \"master (\" + ignite.__version__ + \")\"\n else:\n version = version.replace(\"v\", \"\")\nexcept KeyError:\n version = ignite.__version__\n\n# The full version, including alpha/beta/rc tags\nrelease = \"master\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://pytorch.org/ignite/index.html\",\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n}\n\nhtml_logo = \"_static/img/ignite-logo-dark.svg\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"_templates/_static\"]\n\nhtml_context = {\n \"css_files\": [\n # 'https://fonts.googleapis.com/css?family=Lato',\n # '_static/css/pytorch_theme.css'\n \"_static/css/ignite_theme.css\"\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ignitedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"ignite.tex\", \"ignite Documentation\", \"Torch Contributors\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ignite\", \"ignite Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"ignite\",\n \"ignite Documentation\",\n author,\n \"ignite\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/\": None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/source/conf.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\nimport ignite\nimport pytorch_sphinx_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = \"ignite\"\ncopyright = \"2020, PyTorch-Ignite Contributors\"\nauthor = \"PyTorch-Ignite Contributors\"\n\n# The short X.Y version\ntry:\n version = os.environ[\"code_version\"]\n if \"master\" in version:\n version = \"master (\" + ignite.__version__ + \")\"\n else:\n version = version.replace(\"v\", \"\")\nexcept KeyError:\n version = ignite.__version__\n\n# The full version, including alpha/beta/rc tags\nrelease = \"master\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://pytorch.org/ignite/index.html\",\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n}\n\nhtml_logo = \"_static/img/ignite_logo.svg\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"_templates/_static\"]\n\nhtml_context = {\n \"css_files\": [\n # 'https://fonts.googleapis.com/css?family=Lato',\n # '_static/css/pytorch_theme.css'\n \"_static/css/ignite_theme.css\"\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ignitedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"ignite.tex\", \"ignite Documentation\", \"Torch Contributors\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ignite\", \"ignite Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"ignite\",\n \"ignite Documentation\",\n author,\n \"ignite\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/\": None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/source/conf.py" } ]
diff --git a/README.md b/README.md index 957c91fd7595..164bcf1642e9 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,9 @@ <div align="center"> -![Ignite Logo](assets/ignite_logo.svg) +<!-- ![Ignite Logo](assets/logo/ignite_logo_mixed.svg) --> + +<img src="assets/logo/ignite_logo_mixed.svg" width=512> + [![image](https://travis-ci.org/pytorch/ignite.svg?branch=master)](https://travis-ci.org/pytorch/ignite) @@ -36,7 +39,7 @@ Ignite is a high-level library to help with training and evaluating neural netwo <a href="https://colab.research.google.com/github/pytorch/ignite/blob/master/assets/tldr/teaser.ipynb"> <img alt="PyTorch-Ignite teaser" src="assets/tldr/pytorch-ignite-teaser.gif" - width=532"> + width=532> </a> *Click on the image to see complete code* diff --git a/assets/ignite_logo.svg b/assets/ignite_logo.svg deleted file mode 100644 index cd4719ffe675..000000000000 --- a/assets/ignite_logo.svg +++ /dev/null @@ -1,18 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> -<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0" y="0" width="352.29" height="114.25" viewBox="0, 0, 352.29, 114.25"> - <g id="Layer_1"> - <g> - <path d="M67.526,42.195 C75.733,50.214 75.733,63.265 67.526,71.284 C59.319,79.303 45.955,79.303 37.756,71.284 C29.549,63.265 29.549,50.214 37.756,42.195 C37.756,42.195 37.756,42.195 37.756,42.195 L50.878,29.365 L52.524,27.536 L52.524,27.536 L52.524,17.915 C40.564,29.594 33.923,36.084 32.599,37.385 C21.576,48.148 21.576,65.33 32.599,76.094 C43.615,86.858 61.199,86.858 72.215,76.094 C83.464,65.33 83.464,47.92 72.449,37.385 C71.461,38.342 68.016,41.716 67.526,42.195 z" fill="#EE4C2C" id="bp3iEM1Cm"/> - <g> - <path d="M62.569,26.852 C59.741,26.852 57.443,29.098 57.443,31.861 C57.443,34.624 59.741,36.87 62.569,36.87 C65.397,36.87 67.695,34.624 67.695,31.861 C67.695,29.098 65.397,26.852 62.569,26.852 z" fill="#EE4C2C" id="d5B2guxX6D"/> - </g> - </g> - <path d="M110.607,85.125 L110.607,17.625 L103.803,17.625 L103.803,85.125 z" fill="#000000"/> - <path d="M131.127,52.941 C131.127,45.273 135.879,39.765 143.547,39.765 C151.863,39.765 156.183,45.273 156.183,52.617 C156.183,60.177 151.431,65.577 143.871,65.577 C135.879,65.577 131.127,60.069 131.127,52.941 z M136.959,86.529 C139.767,86.745 143.979,86.853 147.327,86.853 C157.155,86.853 162.339,88.905 162.339,94.737 C162.339,101.433 154.023,106.401 143.763,106.401 C133.287,106.401 126.915,102.081 126.915,96.249 C126.915,92.037 131.019,88.149 136.959,86.529 z M131.127,85.233 C124.755,86.853 120.435,91.821 120.435,97.437 C120.435,106.077 129.507,111.909 142.791,111.909 C156.615,111.909 168.819,104.457 168.819,93.549 C168.819,85.449 162.663,80.589 148.299,80.589 C149.055,80.589 141.927,80.589 141.927,80.589 C133.071,80.589 130.695,78.645 130.695,75.513 C130.695,72.705 133.611,70.653 136.203,69.573 C138.687,70.437 140.847,70.977 143.547,70.977 C154.563,70.977 162.771,63.309 162.771,52.833 C162.771,48.837 161.151,44.085 158.775,41.709 L169.683,42.789 L169.359,36.093 L155.643,37.173 C152.943,35.661 149.163,34.257 143.979,34.257 C132.315,34.257 124.539,42.357 124.539,52.833 C124.539,59.637 127.779,65.037 132.531,67.953 C127.995,69.681 124.431,73.245 124.431,77.349 C124.431,80.913 126.915,83.721 131.127,85.233 z" fill="#000000"/> - <path d="M217.203,85.125 L217.203,51.213 C217.203,40.413 211.371,34.257 201.435,34.257 C196.251,34.257 189.771,37.173 184.587,42.357 L184.587,34.257 L177.891,35.661 L177.891,85.125 L184.587,85.125 L184.587,47.649 C188.691,43.545 194.199,40.629 199.599,40.629 C207.051,40.629 210.507,44.517 210.507,53.157 L210.507,85.125 z" fill="#000000"/> - <path d="M240.423,19.353 C240.423,16.869 238.911,14.601 235.887,14.601 C232.863,14.601 231.351,16.869 231.351,19.353 C231.351,21.945 232.863,24.321 235.887,24.321 C238.911,24.321 240.423,21.945 240.423,19.353 z M239.235,85.125 L239.235,34.257 L232.539,35.661 L232.539,85.125 z" fill="#000000"/> - <path d="M279.627,84.153 L278.439,78.321 C275.415,79.617 272.175,80.697 268.935,80.697 C263.751,80.697 261.483,77.673 261.699,71.409 L262.455,41.817 L278.871,41.817 L279.195,35.877 L262.671,35.877 L262.995,24.321 L256.623,24.321 L256.299,35.553 L248.847,37.281 L248.523,41.493 L256.083,41.925 L255.327,71.193 C255.003,81.561 259.215,86.745 268.611,86.745 C272.175,86.745 276.171,85.773 279.627,84.153 z" fill="#000000"/> - <path d="M293.559,60.609 L327.039,60.393 C327.687,58.233 328.119,55.101 328.119,52.509 C328.119,41.385 320.991,34.257 310.083,34.257 C296.691,34.257 286.755,45.273 286.755,60.825 C286.755,75.837 296.367,86.529 310.623,86.529 C316.779,86.529 322.287,84.909 326.499,82.317 L325.203,76.161 C321.423,78.645 316.239,80.265 310.839,80.265 C300.471,80.265 293.775,72.273 293.559,61.149 z M293.991,55.317 C295.071,47.109 300.795,40.305 309.651,40.305 C317.427,40.305 321.531,45.489 321.531,53.049 C321.531,53.589 321.531,54.669 321.423,55.209 z" fill="#000000"/> - </g> -</svg> diff --git a/assets/logo/ignite_logo.png b/assets/logo/ignite_logo.png new file mode 100644 index 000000000000..0c6651421bf9 Binary files /dev/null and b/assets/logo/ignite_logo.png differ diff --git a/assets/logo/ignite_logo.svg b/assets/logo/ignite_logo.svg new file mode 100644 index 000000000000..9f1d67610a6f --- /dev/null +++ b/assets/logo/ignite_logo.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 501.38 250"><defs><style>.cls-1{fill:#ee4c2c;}.cls-2{fill:#eaa700;}</style></defs><path class="cls-1" d="M245,102.72h-5V60.79H253.5a22.73,22.73,0,0,1,5.57.64,14,14,0,0,1,4.43,2,8.7,8.7,0,0,1,3,3.53,11.73,11.73,0,0,1,1.06,5.15,19.19,19.19,0,0,1-.44,4.36,11.26,11.26,0,0,1-1.55,3.6,9,9,0,0,1-2.85,2.79,14.68,14.68,0,0,1-4.53,1.74,29,29,0,0,1-6.4.63q-2.89,0-6.84-.36Zm7.31-21.81a18.42,18.42,0,0,0,4.71-.53,9,9,0,0,0,3-1.32,5.38,5.38,0,0,0,1.66-2,7.87,7.87,0,0,0,.74-2.31,20.38,20.38,0,0,0,.14-2.54,6,6,0,0,0-2.4-5.24,11.15,11.15,0,0,0-6.51-1.68H245V80.86Z"/><path class="cls-1" d="M270.46,72h5l9.7,25.59L293.75,72h5q-12.43,34.25-12.86,35.31a17.32,17.32,0,0,1-4.68,7.11,11,11,0,0,1-7.23,2.25,18.85,18.85,0,0,1-2.83-.22l-.28-4c1,.08,1.83.11,2.35.11a8.35,8.35,0,0,0,4.81-1.16,9.65,9.65,0,0,0,2.92-4.36q1.15-2.94,1.71-4.54Z"/><path class="cls-1" d="M314.84,102.72H310V65.55H297.22V61.27h30.37v4.28H314.84Z"/><path class="cls-1" d="M351.64,87.33q0,7.14-3.81,11.59t-10.54,4.44A12.83,12.83,0,0,1,327,98.88a17.13,17.13,0,0,1-3.83-11.52q0-7.17,3.84-11.6t10.54-4.43q6.58,0,10.34,4.44T351.64,87.33ZM328,87.41a14.4,14.4,0,0,0,2.43,8.76,8.21,8.21,0,0,0,7.07,3.3,8,8,0,0,0,6.94-3.28q2.34-3.28,2.34-8.83c0-3.76-.77-6.71-2.31-8.87s-3.89-3.24-7.05-3.24a8.12,8.12,0,0,0-7,3.31C328.82,80.76,328,83.71,328,87.41Z"/><path class="cls-1" d="M372,71.69a14.67,14.67,0,0,1,2.3.17l-.17,4.32a13.38,13.38,0,0,0-2.24-.23,8.16,8.16,0,0,0-6.38,2.74,9.76,9.76,0,0,0-2.45,6.76v17.27h-4.68q0-28.31,0-30.75h3.9l.45,5.52a13.05,13.05,0,0,1,4.06-4.2A9.43,9.43,0,0,1,372,71.69Z"/><path class="cls-1" d="M389.6,103.36q-6.78,0-10.6-4.4t-3.83-11.52q0-7.17,3.91-11.64t10.63-4.47a17.61,17.61,0,0,1,4.61.59,10.22,10.22,0,0,1,3.52,1.54l-1.29,3.64a14.33,14.33,0,0,0-6.92-1.76,8.53,8.53,0,0,0-7.05,3.25Q380,81.83,380,87.47q0,5.49,2.56,8.7a8.6,8.6,0,0,0,7.11,3.21,16,16,0,0,0,7.4-2l.58,3.73A13.41,13.41,0,0,1,389.6,103.36Z"/><path class="cls-1" d="M424.44,102.72V84.14q0-4.64-1.48-6.73c-1-1.4-2.69-2.1-5.14-2.1a9.87,9.87,0,0,0-5.95,1.76,8,8,0,0,0-3.16,4.57,41.33,41.33,0,0,0-.25,4.68v16.4h-4.68V60l4.68-.28v12.5c0,1.07,0,2.38-.11,4a12.81,12.81,0,0,1,10.48-4.87q10.29,0,10.29,12.08v19.31Z"/><path class="cls-1" d="M227,182.88h-8.16V113H227Z"/><path class="cls-1" d="M246.47,206.59l-.7-7.13h9.63a37.68,37.68,0,0,0,7-.57,13.7,13.7,0,0,0,5-1.87,13.08,13.08,0,0,0,3.21-2.87,11,11,0,0,0,1.86-4.11,30.69,30.69,0,0,0,.85-5c.15-1.65.21-3.67.21-6.07v-4.58a21,21,0,0,1-6.56,6.26,18.54,18.54,0,0,1-9.49,2.2,21.29,21.29,0,0,1-8.19-1.54,19,19,0,0,1-6.33-4.08,24.83,24.83,0,0,1-4.39-5.91,27.06,27.06,0,0,1-2.65-6.94,32.11,32.11,0,0,1-.82-7.22,37.64,37.64,0,0,1,1.39-10.49,27.48,27.48,0,0,1,4.13-8.59,18.9,18.9,0,0,1,7.2-5.93A23.34,23.34,0,0,1,258,130a19.27,19.27,0,0,1,8.9,2,20.05,20.05,0,0,1,6.82,6v-6.9h7.7v48.07c0,2.21-.06,4.14-.17,5.79a39,39,0,0,1-.8,5.5,21.63,21.63,0,0,1-1.72,5.19,20.19,20.19,0,0,1-3.05,4.37,15.32,15.32,0,0,1-4.68,3.54,28.62,28.62,0,0,1-6.72,2.2,43.78,43.78,0,0,1-9.07.85Zm12.14-30.46a16.36,16.36,0,0,0,7-1.42,11.43,11.43,0,0,0,4.72-3.92,18.2,18.2,0,0,0,2.52-5.57,25.72,25.72,0,0,0,.83-6.7v-3.83a26.76,26.76,0,0,0-.85-7,18,18,0,0,0-2.62-5.72,12,12,0,0,0-4.72-3.94,15.79,15.79,0,0,0-6.87-1.42,14,14,0,0,0-5.93,1.23,12.76,12.76,0,0,0-4.39,3.23,19.77,19.77,0,0,0-2.93,4.68,23.77,23.77,0,0,0-1.72,5.41,29.24,29.24,0,0,0-.52,5.52,26.76,26.76,0,0,0,.56,5.43,22.89,22.89,0,0,0,1.8,5.24,18.71,18.71,0,0,0,3,4.47,13.37,13.37,0,0,0,4.37,3.09A13.77,13.77,0,0,0,258.61,176.13Z"/><path class="cls-1" d="M292.55,182.88v-51.8h6.61l1,7.41a23,23,0,0,1,7.67-6.16,21.75,21.75,0,0,1,9.94-2.34q9,0,13.27,5t4.3,15.46v32.39h-7.89q0-31.15-.05-33.38-.18-6.62-2.66-9.68t-8.29-3.07a16.84,16.84,0,0,0-10.15,2.93,13.29,13.29,0,0,0-5.29,7.6,49,49,0,0,0-.47,7.27v28.33Z"/><path class="cls-1" d="M353.76,112.24v9.35h-7.94v-9.35Zm0,18.84v51.8h-7.94v-51.8Z"/><path class="cls-1" d="M385.11,183.83q-7.65,0-10.86-3.73t-3.4-12V136.94h-7.69l.33-5.2,7.32-.66,2.69-11.85,5.19-.19v12h14.4v5.86h-14.4v29.7q0,5.9,1.8,8.31c1.19,1.61,3.24,2.41,6.13,2.41a60.73,60.73,0,0,0,6.76-.76l.33,6.24A53.69,53.69,0,0,1,385.11,183.83Z"/><path class="cls-1" d="M421.5,184q-11.28,0-17.77-7.18t-6.5-19.64q0-12,6.28-19.53A21,21,0,0,1,420,130q9.63,0,15.28,6.59T441,154.17c0,.38,0,1.07,0,2.08s0,1.73,0,2.17H405.35q.15,8.84,4.56,13.77t11.88,4.93a35.22,35.22,0,0,0,16.48-4.34l1.23,6.37A33.86,33.86,0,0,1,421.5,184Zm8-43.33a11.76,11.76,0,0,0-9.51-4.13,12.64,12.64,0,0,0-9.87,4.28q-3.82,4.28-4.49,11.68h27.34Q432.93,144.78,429.46,140.64Z"/><circle class="cls-2" cx="116.9" cy="181.95" r="10.74"/><path class="cls-1" d="M115.67,216.35c-25.85,0-47.76-17.44-60.12-47.84-10.87-26.71-1.36-56.71,5.89-66.12l6-7.78.87,9.79c.84,9.62,7.62,21.11,14.22,24.11a6.8,6.8,0,0,0,3,.67c-8.21-19.42-6.69-39.91-2.63-52.07C90.43,54.47,109.2,38.46,120,33.22l5.65-2.75-.14,6.28c-.4,18.12.75,33,7.66,46.84,4.47,9,10.47,17.75,15.77,25.5,1.45,2.13,2.86,4.2,4.2,6.2,10.07,15.11,10.22,30.81,7,40.47a51,51,0,0,1-8.21,15.06c-4.42,5.59-9.66,9.59-15.15,11.58l-2.62-7.21c8.92-3.23,15.94-13.59,18.7-21.86,2.64-7.93,2.43-21-6.11-33.78-1.32-2-2.71-4-4.15-6.13-5.43-8-11.59-17-16.3-26.4-6.69-13.38-8.49-27.31-8.53-43.56-9.44,6.56-22,19.43-27.57,36.08-.86,2.56-8,25.75,3.93,50l1.43,2.89-2.61,1.91a13,13,0,0,1-13.56,1.18c-7-3.19-13-11.52-16.19-20-4.53,11.72-8,31.7-.51,50.14,11.12,27.36,30.45,43,53,43,23.11,0,43.57-15.89,54.73-42.5,5.18-12.37,6-31.5,2-45.51-3-10.43-7.43-18.94-15.06-30.94-1.93,4.08-4.57,8.26-7.27,10.06l-4.26-6.38c2.21-1.48,6-9.22,6.8-13l1.82-9.19,5.15,7.83c10.47,15.93,16.46,26.41,20.2,39.48,4.5,15.76,3.54,36.56-2.33,50.58-5.79,13.81-14.29,25.46-24.59,33.71A59.2,59.2,0,0,1,115.67,216.35Z"/></svg> \ No newline at end of file diff --git a/assets/logo/ignite_logo_dark.png b/assets/logo/ignite_logo_dark.png new file mode 100644 index 000000000000..54de84a55de4 Binary files /dev/null and b/assets/logo/ignite_logo_dark.png differ diff --git a/assets/logo/ignite_logo_dark.svg b/assets/logo/ignite_logo_dark.svg new file mode 100644 index 000000000000..d168c141c0c9 --- /dev/null +++ b/assets/logo/ignite_logo_dark.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 501.38 250"><defs><style>.cls-1{fill:#2a2a2a;}</style></defs><path class="cls-1" d="M245,102.72h-5V60.79H253.5a22.73,22.73,0,0,1,5.57.64,14,14,0,0,1,4.43,2,8.7,8.7,0,0,1,3,3.53,11.73,11.73,0,0,1,1.06,5.15,19.19,19.19,0,0,1-.44,4.36,11.26,11.26,0,0,1-1.55,3.6,9,9,0,0,1-2.85,2.79,14.68,14.68,0,0,1-4.53,1.74,29,29,0,0,1-6.4.63q-2.89,0-6.84-.36Zm7.31-21.81a18.42,18.42,0,0,0,4.71-.53,9,9,0,0,0,3-1.32,5.38,5.38,0,0,0,1.66-2,7.87,7.87,0,0,0,.74-2.31,20.38,20.38,0,0,0,.14-2.54,6,6,0,0,0-2.4-5.24,11.15,11.15,0,0,0-6.51-1.68H245V80.86Z"/><path class="cls-1" d="M270.46,72h5l9.7,25.59L293.75,72h5q-12.43,34.25-12.86,35.31a17.32,17.32,0,0,1-4.68,7.11,11,11,0,0,1-7.23,2.25,18.85,18.85,0,0,1-2.83-.22l-.28-4c1,.08,1.83.11,2.35.11a8.35,8.35,0,0,0,4.81-1.16,9.65,9.65,0,0,0,2.92-4.36q1.15-2.94,1.71-4.54Z"/><path class="cls-1" d="M314.84,102.72H310V65.55H297.22V61.27h30.37v4.28H314.84Z"/><path class="cls-1" d="M351.64,87.33q0,7.14-3.81,11.59t-10.54,4.44A12.83,12.83,0,0,1,327,98.88a17.13,17.13,0,0,1-3.83-11.52q0-7.17,3.84-11.6t10.54-4.43q6.58,0,10.34,4.44T351.64,87.33ZM328,87.41a14.4,14.4,0,0,0,2.43,8.76,8.21,8.21,0,0,0,7.07,3.3,8,8,0,0,0,6.94-3.28q2.34-3.28,2.34-8.83c0-3.76-.77-6.71-2.31-8.87s-3.89-3.24-7.05-3.24a8.12,8.12,0,0,0-7,3.31C328.82,80.76,328,83.71,328,87.41Z"/><path class="cls-1" d="M372,71.69a14.67,14.67,0,0,1,2.3.17l-.17,4.32a13.38,13.38,0,0,0-2.24-.23,8.16,8.16,0,0,0-6.38,2.74,9.76,9.76,0,0,0-2.45,6.76v17.27h-4.68q0-28.31,0-30.75h3.9l.45,5.52a13.05,13.05,0,0,1,4.06-4.2A9.43,9.43,0,0,1,372,71.69Z"/><path class="cls-1" d="M389.6,103.36q-6.78,0-10.6-4.4t-3.83-11.52q0-7.17,3.91-11.64t10.63-4.47a17.61,17.61,0,0,1,4.61.59,10.22,10.22,0,0,1,3.52,1.54l-1.29,3.64a14.33,14.33,0,0,0-6.92-1.76,8.53,8.53,0,0,0-7.05,3.25Q380,81.83,380,87.47q0,5.49,2.56,8.7a8.6,8.6,0,0,0,7.11,3.21,16,16,0,0,0,7.4-2l.58,3.73A13.41,13.41,0,0,1,389.6,103.36Z"/><path class="cls-1" d="M424.44,102.72V84.14q0-4.64-1.48-6.73c-1-1.4-2.69-2.1-5.14-2.1a9.87,9.87,0,0,0-5.95,1.76,8,8,0,0,0-3.16,4.57,41.33,41.33,0,0,0-.25,4.68v16.4h-4.68V60l4.68-.28v12.5c0,1.07,0,2.38-.11,4a12.81,12.81,0,0,1,10.48-4.87q10.29,0,10.29,12.08v19.31Z"/><path class="cls-1" d="M227,182.88h-8.16V113H227Z"/><path class="cls-1" d="M246.47,206.59l-.7-7.13h9.63a37.68,37.68,0,0,0,7-.57,13.7,13.7,0,0,0,5-1.87,13.08,13.08,0,0,0,3.21-2.87,11,11,0,0,0,1.86-4.11,30.69,30.69,0,0,0,.85-5c.15-1.65.21-3.67.21-6.07v-4.58a21,21,0,0,1-6.56,6.26,18.54,18.54,0,0,1-9.49,2.2,21.29,21.29,0,0,1-8.19-1.54,19,19,0,0,1-6.33-4.08,24.83,24.83,0,0,1-4.39-5.91,27.06,27.06,0,0,1-2.65-6.94,32.11,32.11,0,0,1-.82-7.22,37.64,37.64,0,0,1,1.39-10.49,27.48,27.48,0,0,1,4.13-8.59,18.9,18.9,0,0,1,7.2-5.93A23.34,23.34,0,0,1,258,130a19.27,19.27,0,0,1,8.9,2,20.05,20.05,0,0,1,6.82,6v-6.9h7.7v48.07c0,2.21-.06,4.14-.17,5.79a39,39,0,0,1-.8,5.5,21.63,21.63,0,0,1-1.72,5.19,20.19,20.19,0,0,1-3.05,4.37,15.32,15.32,0,0,1-4.68,3.54,28.62,28.62,0,0,1-6.72,2.2,43.78,43.78,0,0,1-9.07.85Zm12.14-30.46a16.36,16.36,0,0,0,7-1.42,11.43,11.43,0,0,0,4.72-3.92,18.2,18.2,0,0,0,2.52-5.57,25.72,25.72,0,0,0,.83-6.7v-3.83a26.76,26.76,0,0,0-.85-7,18,18,0,0,0-2.62-5.72,12,12,0,0,0-4.72-3.94,15.79,15.79,0,0,0-6.87-1.42,14,14,0,0,0-5.93,1.23,12.76,12.76,0,0,0-4.39,3.23,19.77,19.77,0,0,0-2.93,4.68,23.77,23.77,0,0,0-1.72,5.41,29.24,29.24,0,0,0-.52,5.52,26.76,26.76,0,0,0,.56,5.43,22.89,22.89,0,0,0,1.8,5.24,18.71,18.71,0,0,0,3,4.47,13.37,13.37,0,0,0,4.37,3.09A13.77,13.77,0,0,0,258.61,176.13Z"/><path class="cls-1" d="M292.55,182.88v-51.8h6.61l1,7.41a23,23,0,0,1,7.67-6.16,21.75,21.75,0,0,1,9.94-2.34q9,0,13.27,5t4.3,15.46v32.39h-7.89q0-31.15-.05-33.38-.18-6.62-2.66-9.68t-8.29-3.07a16.84,16.84,0,0,0-10.15,2.93,13.29,13.29,0,0,0-5.29,7.6,49,49,0,0,0-.47,7.27v28.33Z"/><path class="cls-1" d="M353.76,112.24v9.35h-7.94v-9.35Zm0,18.84v51.8h-7.94v-51.8Z"/><path class="cls-1" d="M385.11,183.83q-7.65,0-10.86-3.73t-3.4-12V136.94h-7.69l.33-5.2,7.32-.66,2.69-11.85,5.19-.19v12h14.4v5.86h-14.4v29.7q0,5.9,1.8,8.31c1.19,1.61,3.24,2.41,6.13,2.41a60.73,60.73,0,0,0,6.76-.76l.33,6.24A53.69,53.69,0,0,1,385.11,183.83Z"/><path class="cls-1" d="M421.5,184q-11.28,0-17.77-7.18t-6.5-19.64q0-12,6.28-19.53A21,21,0,0,1,420,130q9.63,0,15.28,6.59T441,154.17c0,.38,0,1.07,0,2.08s0,1.73,0,2.17H405.35q.15,8.84,4.56,13.77t11.88,4.93a35.22,35.22,0,0,0,16.48-4.34l1.23,6.37A33.86,33.86,0,0,1,421.5,184Zm8-43.33a11.76,11.76,0,0,0-9.51-4.13,12.64,12.64,0,0,0-9.87,4.28q-3.82,4.28-4.49,11.68h27.34Q432.93,144.78,429.46,140.64Z"/><circle class="cls-1" cx="116.9" cy="181.95" r="10.74"/><path class="cls-1" d="M115.67,216.35c-25.85,0-47.76-17.44-60.12-47.84-10.87-26.71-1.36-56.71,5.89-66.12l6-7.78.87,9.79c.84,9.62,7.62,21.11,14.22,24.11a6.8,6.8,0,0,0,3,.67c-8.21-19.42-6.69-39.91-2.63-52.07C90.43,54.47,109.2,38.46,120,33.22l5.65-2.75-.14,6.28c-.4,18.12.75,33,7.66,46.84,4.47,9,10.47,17.75,15.77,25.5,1.45,2.13,2.86,4.2,4.2,6.2,10.07,15.11,10.22,30.81,7,40.47a51,51,0,0,1-8.21,15.06c-4.42,5.59-9.66,9.59-15.15,11.58l-2.62-7.21c8.92-3.23,15.94-13.59,18.7-21.86,2.64-7.93,2.43-21-6.11-33.78-1.32-2-2.71-4-4.15-6.13-5.43-8-11.59-17-16.3-26.4-6.69-13.38-8.49-27.31-8.53-43.56-9.44,6.56-22,19.43-27.57,36.08-.86,2.56-8,25.75,3.93,50l1.43,2.89-2.61,1.91a13,13,0,0,1-13.56,1.18c-7-3.19-13-11.52-16.19-20-4.53,11.72-8,31.7-.51,50.14,11.12,27.36,30.45,43,53,43,23.11,0,43.57-15.89,54.73-42.5,5.18-12.37,6-31.5,2-45.51-3-10.43-7.43-18.94-15.06-30.94-1.93,4.08-4.57,8.26-7.27,10.06l-4.26-6.38c2.21-1.48,6-9.22,6.8-13l1.82-9.19,5.15,7.83c10.47,15.93,16.46,26.41,20.2,39.48,4.5,15.76,3.54,36.56-2.33,50.58-5.79,13.81-14.29,25.46-24.59,33.71A59.2,59.2,0,0,1,115.67,216.35Z"/></svg> \ No newline at end of file diff --git a/assets/logo/ignite_logo_guidelines.md b/assets/logo/ignite_logo_guidelines.md new file mode 100644 index 000000000000..4dd199a10f9a --- /dev/null +++ b/assets/logo/ignite_logo_guidelines.md @@ -0,0 +1,23 @@ +# PyTorch Ignite Logo Guidelines +These guidelines are meant to help keep the PyTorch Ignite logo (as developed in #1221) consistent and recognizable across all its uses. They also provide a common language for referring to the logos and their components. + +The primary logo is the combination of the logomark and wordmark next to each other. The logomark is the flame alone (no text) and the wordmark is only the text. It's preferable to use the primary logo whenever possible, and the logomark when a smaller version is needed. + +## Color +The full color options are a combonation of PyTorch's main orange (`#ee4c2c`) with yellow details (`#eaa700`). Light options are white (`#FFFFFF`) and dark options dark grey (`#2a2a2a`). The alternate "mixed" logo uses the full color logomark with a dark grey wordmark. + +Whenever possible, use the full color logos. One color logos (light or dark) are to be used when full color will not have enough contrast, usually when logos must be on colored backgrounds or are being reproduced somewhere that doesn't support color. + +Please note: The orange (`#ee4c2c`) and yellow (`#eaa700`) do not meet WCAG 2.1 color contrast recommendations for text or UI when used with white or other light colors. Make sure to use these colors primarily as decorative elements or with a dark color for text and/or UI. Accessibility should not be overlooked. + +## Type +The PyTorch Ignite wordmark is made from Oxygen (by Vernon Adams @vernnobile). + +## Minimum Size +For consistent legibility, please do not display the primary logo at less than 60px wide or the logomark at less than 15px wide. + +## Logo Integrity +A few other notes to keep in mind when using the logo: +- Make sure to scale the logo proportionally. +- Maintain a good amount of space around the logo. Don’t let it overlap with text, images, or other elements. +- Do not try and recreate or modify the logo. For example, do not use the logomark and then try to write PyTorch Ignite in another font. \ No newline at end of file diff --git a/assets/logo/ignite_logo_light.png b/assets/logo/ignite_logo_light.png new file mode 100644 index 000000000000..6dc4cd2339e3 Binary files /dev/null and b/assets/logo/ignite_logo_light.png differ diff --git a/assets/logo/ignite_logo_light.svg b/assets/logo/ignite_logo_light.svg new file mode 100644 index 000000000000..eafba36b1a36 --- /dev/null +++ b/assets/logo/ignite_logo_light.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 501 250"><defs><style>.cls-1{fill:#fff;}</style></defs><circle class="cls-1" cx="116.9" cy="181.95" r="10.74"/><path class="cls-1" d="M115.67,216.35c-25.85,0-47.76-17.44-60.12-47.84-10.87-26.71-1.36-56.71,5.89-66.12l6-7.78.87,9.79c.84,9.62,7.62,21.11,14.22,24.11a6.8,6.8,0,0,0,3,.67c-8.21-19.42-6.69-39.91-2.63-52.07C90.43,54.47,109.2,38.46,120,33.22l5.65-2.75-.14,6.28c-.4,18.12.75,33,7.66,46.84,4.47,9,10.47,17.75,15.77,25.5,1.45,2.13,2.86,4.2,4.2,6.2,10.07,15.11,10.22,30.81,7,40.47a51,51,0,0,1-8.21,15.06c-4.42,5.59-9.66,9.59-15.15,11.58l-2.62-7.21c8.92-3.23,15.94-13.59,18.7-21.86,2.64-7.93,2.43-21-6.11-33.78-1.32-2-2.71-4-4.15-6.13-5.43-8-11.59-17-16.3-26.4-6.69-13.38-8.49-27.31-8.53-43.56-9.44,6.56-22,19.43-27.57,36.08-.86,2.56-8,25.75,3.93,50l1.43,2.89-2.61,1.91a13,13,0,0,1-13.56,1.18c-7-3.19-13-11.52-16.19-20-4.53,11.72-8,31.7-.51,50.14,11.12,27.36,30.45,43,53,43,23.11,0,43.57-15.89,54.73-42.5,5.18-12.37,6-31.5,2-45.51-3-10.43-7.43-18.94-15.06-30.94-1.93,4.08-4.57,8.26-7.27,10.06l-4.26-6.38c2.21-1.48,6-9.22,6.8-13l1.82-9.19,5.15,7.83c10.47,15.93,16.46,26.41,20.2,39.48,4.5,15.76,3.54,36.56-2.33,50.58-5.79,13.81-14.29,25.46-24.59,33.71A59.2,59.2,0,0,1,115.67,216.35Z"/><path class="cls-1" d="M244.76,102.72h-5V60.79h13.51a22.73,22.73,0,0,1,5.57.64,14,14,0,0,1,4.43,2,8.7,8.7,0,0,1,3,3.53,11.73,11.73,0,0,1,1.06,5.15,19.78,19.78,0,0,1-.44,4.36,11.26,11.26,0,0,1-1.55,3.6,9,9,0,0,1-2.85,2.79A14.68,14.68,0,0,1,258,84.57a29,29,0,0,1-6.4.63q-2.9,0-6.84-.36Zm7.31-21.81a18.42,18.42,0,0,0,4.71-.53,9,9,0,0,0,3-1.32,5.38,5.38,0,0,0,1.66-2,7.87,7.87,0,0,0,.74-2.31,20.38,20.38,0,0,0,.14-2.54,6,6,0,0,0-2.4-5.24,11.15,11.15,0,0,0-6.51-1.68h-8.66V80.86Z"/><path class="cls-1" d="M270.27,72h5l9.7,25.59L293.56,72h5q-12.43,34.25-12.86,35.31a17.32,17.32,0,0,1-4.68,7.11,11,11,0,0,1-7.23,2.25,18.85,18.85,0,0,1-2.83-.22l-.28-4c1,.08,1.83.11,2.35.11a8.29,8.29,0,0,0,4.8-1.16,9.58,9.58,0,0,0,2.93-4.36q1.15-2.94,1.71-4.54Z"/><path class="cls-1" d="M314.65,102.72h-4.87V65.55H297V61.27H327.4v4.28H314.65Z"/><path class="cls-1" d="M351.45,87.33q0,7.14-3.81,11.59t-10.54,4.44a12.83,12.83,0,0,1-10.3-4.48A17.13,17.13,0,0,1,323,87.36q0-7.17,3.84-11.6t10.54-4.43q6.58,0,10.34,4.44T351.45,87.33Zm-23.63.08a14.4,14.4,0,0,0,2.43,8.76,8.21,8.21,0,0,0,7.07,3.3,8,8,0,0,0,6.94-3.28q2.34-3.28,2.34-8.83c0-3.76-.77-6.71-2.31-8.87s-3.89-3.24-7.05-3.24a8.12,8.12,0,0,0-7,3.31C328.63,80.76,327.82,83.71,327.82,87.41Z"/><path class="cls-1" d="M371.85,71.69a14.67,14.67,0,0,1,2.3.17L374,76.18a13.38,13.38,0,0,0-2.24-.23,8.16,8.16,0,0,0-6.38,2.74,9.76,9.76,0,0,0-2.45,6.76v17.27h-4.68q0-28.31,0-30.75h3.9l.45,5.52a13,13,0,0,1,4.06-4.2A9.43,9.43,0,0,1,371.85,71.69Z"/><path class="cls-1" d="M389.41,103.36q-6.78,0-10.61-4.4T375,87.44q0-7.17,3.91-11.64t10.63-4.47a17.61,17.61,0,0,1,4.61.59,10.22,10.22,0,0,1,3.52,1.54l-1.29,3.64a14.33,14.33,0,0,0-6.92-1.76,8.53,8.53,0,0,0-7.05,3.25q-2.59,3.24-2.59,8.88,0,5.49,2.56,8.7a8.6,8.6,0,0,0,7.11,3.21,16,16,0,0,0,7.4-2l.58,3.73A13.41,13.41,0,0,1,389.41,103.36Z"/><path class="cls-1" d="M424.25,102.72V84.14q0-4.64-1.48-6.73c-1-1.4-2.69-2.1-5.14-2.1a9.87,9.87,0,0,0-5.95,1.76,8,8,0,0,0-3.16,4.57,41.33,41.33,0,0,0-.25,4.68v16.4h-4.68V60l4.68-.28v12.5c0,1.07,0,2.38-.11,4a12.81,12.81,0,0,1,10.48-4.87q10.29,0,10.29,12.08v19.31Z"/><path class="cls-1" d="M226.81,182.88h-8.17V113h8.17Z"/><path class="cls-1" d="M246.28,206.59l-.71-7.13h9.64a37.68,37.68,0,0,0,7-.57,13.7,13.7,0,0,0,5-1.87,13.08,13.08,0,0,0,3.21-2.87,11.17,11.17,0,0,0,1.86-4.11,30.69,30.69,0,0,0,.85-5q.21-2.47.21-6.07v-4.58a21.12,21.12,0,0,1-6.56,6.26,18.54,18.54,0,0,1-9.49,2.2,21.29,21.29,0,0,1-8.19-1.54,19,19,0,0,1-6.33-4.08,25.14,25.14,0,0,1-4.4-5.91,27.4,27.4,0,0,1-2.64-6.94,32.11,32.11,0,0,1-.82-7.22,37.64,37.64,0,0,1,1.39-10.49,27.48,27.48,0,0,1,4.13-8.59,18.9,18.9,0,0,1,7.2-5.93A23.34,23.34,0,0,1,257.81,130a19.27,19.27,0,0,1,8.9,2,20.05,20.05,0,0,1,6.82,6v-6.9h7.7v48.07c0,2.21-.06,4.14-.17,5.79a39,39,0,0,1-.8,5.5,21.63,21.63,0,0,1-1.72,5.19,20.55,20.55,0,0,1-3.05,4.37,15.32,15.32,0,0,1-4.68,3.54,28.62,28.62,0,0,1-6.72,2.2,43.78,43.78,0,0,1-9.07.85Zm12.14-30.46a16.39,16.39,0,0,0,7-1.42,11.43,11.43,0,0,0,4.72-3.92,18.2,18.2,0,0,0,2.52-5.57,25.72,25.72,0,0,0,.83-6.7v-3.83a26.76,26.76,0,0,0-.85-7,18,18,0,0,0-2.62-5.72,12,12,0,0,0-4.72-3.94,15.79,15.79,0,0,0-6.87-1.42,14,14,0,0,0-5.93,1.23,12.76,12.76,0,0,0-4.39,3.23,19.45,19.45,0,0,0-2.93,4.68,23.77,23.77,0,0,0-1.72,5.41,29.24,29.24,0,0,0-.52,5.52,26.76,26.76,0,0,0,.56,5.43,22.89,22.89,0,0,0,1.8,5.24,18.71,18.71,0,0,0,3,4.47,13.37,13.37,0,0,0,4.37,3.09A13.77,13.77,0,0,0,258.42,176.13Z"/><path class="cls-1" d="M292.35,182.88v-51.8H299l1,7.41a23,23,0,0,1,7.67-6.16,21.75,21.75,0,0,1,9.94-2.34q9,0,13.27,5t4.3,15.46v32.39h-7.89q0-31.15-.05-33.38-.18-6.62-2.66-9.68t-8.29-3.07a16.84,16.84,0,0,0-10.15,2.93,13.29,13.29,0,0,0-5.29,7.6,49,49,0,0,0-.47,7.27v28.33Z"/><path class="cls-1" d="M353.57,112.24v9.35h-7.94v-9.35Zm0,18.84v51.8h-7.94v-51.8Z"/><path class="cls-1" d="M384.92,183.83q-7.65,0-10.86-3.73t-3.4-12V136.94H363l.34-5.2,7.31-.66,2.7-11.85,5.19-.19v12h14.4v5.86H378.5v29.7q0,5.9,1.8,8.31c1.19,1.61,3.24,2.41,6.13,2.41a60.73,60.73,0,0,0,6.76-.76l.33,6.24A53.69,53.69,0,0,1,384.92,183.83Z"/><path class="cls-1" d="M421.31,184q-11.28,0-17.77-7.18T397,157.15q0-12,6.28-19.53A21,21,0,0,1,419.85,130q9.63,0,15.27,6.59t5.65,17.59c0,.38,0,1.07,0,2.08s0,1.73,0,2.17H405.16q.15,8.84,4.56,13.77t11.88,4.93a35.25,35.25,0,0,0,16.48-4.34l1.23,6.37A33.86,33.86,0,0,1,421.31,184Zm8-43.33a11.76,11.76,0,0,0-9.51-4.13,12.64,12.64,0,0,0-9.87,4.28q-3.83,4.28-4.49,11.68h27.34Q432.74,144.78,429.27,140.64Z"/></svg> \ No newline at end of file diff --git a/assets/logo/ignite_logo_mixed.png b/assets/logo/ignite_logo_mixed.png new file mode 100644 index 000000000000..a3bbe62c9b81 Binary files /dev/null and b/assets/logo/ignite_logo_mixed.png differ diff --git a/assets/logo/ignite_logo_mixed.svg b/assets/logo/ignite_logo_mixed.svg new file mode 100644 index 000000000000..1a646508bccc --- /dev/null +++ b/assets/logo/ignite_logo_mixed.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 501.38 250"><defs><style>.cls-1{fill:#2a2a2a;}.cls-2{fill:#eaa700;}.cls-3{fill:#ee4c2c;}</style></defs><path class="cls-1" d="M245,102.72h-5V60.79H253.5a22.73,22.73,0,0,1,5.57.64,14,14,0,0,1,4.43,2,8.7,8.7,0,0,1,3,3.53,11.73,11.73,0,0,1,1.06,5.15,19.19,19.19,0,0,1-.44,4.36,11.26,11.26,0,0,1-1.55,3.6,9,9,0,0,1-2.85,2.79,14.68,14.68,0,0,1-4.53,1.74,29,29,0,0,1-6.4.63q-2.89,0-6.84-.36Zm7.31-21.81a18.42,18.42,0,0,0,4.71-.53,9,9,0,0,0,3-1.32,5.38,5.38,0,0,0,1.66-2,7.87,7.87,0,0,0,.74-2.31,20.38,20.38,0,0,0,.14-2.54,6,6,0,0,0-2.4-5.24,11.15,11.15,0,0,0-6.51-1.68H245V80.86Z"/><path class="cls-1" d="M270.46,72h5l9.7,25.59L293.75,72h5q-12.43,34.25-12.86,35.31a17.32,17.32,0,0,1-4.68,7.11,11,11,0,0,1-7.23,2.25,18.85,18.85,0,0,1-2.83-.22l-.28-4c1,.08,1.83.11,2.35.11a8.35,8.35,0,0,0,4.81-1.16,9.65,9.65,0,0,0,2.92-4.36q1.15-2.94,1.71-4.54Z"/><path class="cls-1" d="M314.84,102.72H310V65.55H297.22V61.27h30.37v4.28H314.84Z"/><path class="cls-1" d="M351.64,87.33q0,7.14-3.81,11.59t-10.54,4.44A12.83,12.83,0,0,1,327,98.88a17.13,17.13,0,0,1-3.83-11.52q0-7.17,3.84-11.6t10.54-4.43q6.58,0,10.34,4.44T351.64,87.33ZM328,87.41a14.4,14.4,0,0,0,2.43,8.76,8.21,8.21,0,0,0,7.07,3.3,8,8,0,0,0,6.94-3.28q2.34-3.28,2.34-8.83c0-3.76-.77-6.71-2.31-8.87s-3.89-3.24-7.05-3.24a8.12,8.12,0,0,0-7,3.31C328.82,80.76,328,83.71,328,87.41Z"/><path class="cls-1" d="M372,71.69a14.67,14.67,0,0,1,2.3.17l-.17,4.32a13.38,13.38,0,0,0-2.24-.23,8.16,8.16,0,0,0-6.38,2.74,9.76,9.76,0,0,0-2.45,6.76v17.27h-4.68q0-28.31,0-30.75h3.9l.45,5.52a13.05,13.05,0,0,1,4.06-4.2A9.43,9.43,0,0,1,372,71.69Z"/><path class="cls-1" d="M389.6,103.36q-6.78,0-10.6-4.4t-3.83-11.52q0-7.17,3.91-11.64t10.63-4.47a17.61,17.61,0,0,1,4.61.59,10.22,10.22,0,0,1,3.52,1.54l-1.29,3.64a14.33,14.33,0,0,0-6.92-1.76,8.53,8.53,0,0,0-7.05,3.25Q380,81.83,380,87.47q0,5.49,2.56,8.7a8.6,8.6,0,0,0,7.11,3.21,16,16,0,0,0,7.4-2l.58,3.73A13.41,13.41,0,0,1,389.6,103.36Z"/><path class="cls-1" d="M424.44,102.72V84.14q0-4.64-1.48-6.73c-1-1.4-2.69-2.1-5.14-2.1a9.87,9.87,0,0,0-5.95,1.76,8,8,0,0,0-3.16,4.57,41.33,41.33,0,0,0-.25,4.68v16.4h-4.68V60l4.68-.28v12.5c0,1.07,0,2.38-.11,4a12.81,12.81,0,0,1,10.48-4.87q10.29,0,10.29,12.08v19.31Z"/><path class="cls-1" d="M227,182.88h-8.16V113H227Z"/><path class="cls-1" d="M246.47,206.59l-.7-7.13h9.63a37.68,37.68,0,0,0,7-.57,13.7,13.7,0,0,0,5-1.87,13.08,13.08,0,0,0,3.21-2.87,11,11,0,0,0,1.86-4.11,30.69,30.69,0,0,0,.85-5c.15-1.65.21-3.67.21-6.07v-4.58a21,21,0,0,1-6.56,6.26,18.54,18.54,0,0,1-9.49,2.2,21.29,21.29,0,0,1-8.19-1.54,19,19,0,0,1-6.33-4.08,24.83,24.83,0,0,1-4.39-5.91,27.06,27.06,0,0,1-2.65-6.94,32.11,32.11,0,0,1-.82-7.22,37.64,37.64,0,0,1,1.39-10.49,27.48,27.48,0,0,1,4.13-8.59,18.9,18.9,0,0,1,7.2-5.93A23.34,23.34,0,0,1,258,130a19.27,19.27,0,0,1,8.9,2,20.05,20.05,0,0,1,6.82,6v-6.9h7.7v48.07c0,2.21-.06,4.14-.17,5.79a39,39,0,0,1-.8,5.5,21.63,21.63,0,0,1-1.72,5.19,20.19,20.19,0,0,1-3.05,4.37,15.32,15.32,0,0,1-4.68,3.54,28.62,28.62,0,0,1-6.72,2.2,43.78,43.78,0,0,1-9.07.85Zm12.14-30.46a16.36,16.36,0,0,0,7-1.42,11.43,11.43,0,0,0,4.72-3.92,18.2,18.2,0,0,0,2.52-5.57,25.72,25.72,0,0,0,.83-6.7v-3.83a26.76,26.76,0,0,0-.85-7,18,18,0,0,0-2.62-5.72,12,12,0,0,0-4.72-3.94,15.79,15.79,0,0,0-6.87-1.42,14,14,0,0,0-5.93,1.23,12.76,12.76,0,0,0-4.39,3.23,19.77,19.77,0,0,0-2.93,4.68,23.77,23.77,0,0,0-1.72,5.41,29.24,29.24,0,0,0-.52,5.52,26.76,26.76,0,0,0,.56,5.43,22.89,22.89,0,0,0,1.8,5.24,18.71,18.71,0,0,0,3,4.47,13.37,13.37,0,0,0,4.37,3.09A13.77,13.77,0,0,0,258.61,176.13Z"/><path class="cls-1" d="M292.55,182.88v-51.8h6.61l1,7.41a23,23,0,0,1,7.67-6.16,21.75,21.75,0,0,1,9.94-2.34q9,0,13.27,5t4.3,15.46v32.39h-7.89q0-31.15-.05-33.38-.18-6.62-2.66-9.68t-8.29-3.07a16.84,16.84,0,0,0-10.15,2.93,13.29,13.29,0,0,0-5.29,7.6,49,49,0,0,0-.47,7.27v28.33Z"/><path class="cls-1" d="M353.76,112.24v9.35h-7.94v-9.35Zm0,18.84v51.8h-7.94v-51.8Z"/><path class="cls-1" d="M385.11,183.83q-7.65,0-10.86-3.73t-3.4-12V136.94h-7.69l.33-5.2,7.32-.66,2.69-11.85,5.19-.19v12h14.4v5.86h-14.4v29.7q0,5.9,1.8,8.31c1.19,1.61,3.24,2.41,6.13,2.41a60.73,60.73,0,0,0,6.76-.76l.33,6.24A53.69,53.69,0,0,1,385.11,183.83Z"/><path class="cls-1" d="M421.5,184q-11.28,0-17.77-7.18t-6.5-19.64q0-12,6.28-19.53A21,21,0,0,1,420,130q9.63,0,15.28,6.59T441,154.17c0,.38,0,1.07,0,2.08s0,1.73,0,2.17H405.35q.15,8.84,4.56,13.77t11.88,4.93a35.22,35.22,0,0,0,16.48-4.34l1.23,6.37A33.86,33.86,0,0,1,421.5,184Zm8-43.33a11.76,11.76,0,0,0-9.51-4.13,12.64,12.64,0,0,0-9.87,4.28q-3.82,4.28-4.49,11.68h27.34Q432.93,144.78,429.46,140.64Z"/><circle class="cls-2" cx="116.9" cy="181.95" r="10.74"/><path class="cls-3" d="M115.67,216.35c-25.85,0-47.76-17.44-60.12-47.84-10.87-26.71-1.36-56.71,5.89-66.12l6-7.78.87,9.79c.84,9.62,7.62,21.11,14.22,24.11a6.8,6.8,0,0,0,3,.67c-8.21-19.42-6.69-39.91-2.63-52.07C90.43,54.47,109.2,38.46,120,33.22l5.65-2.75-.14,6.28c-.4,18.12.75,33,7.66,46.84,4.47,9,10.47,17.75,15.77,25.5,1.45,2.13,2.86,4.2,4.2,6.2,10.07,15.11,10.22,30.81,7,40.47a51,51,0,0,1-8.21,15.06c-4.42,5.59-9.66,9.59-15.15,11.58l-2.62-7.21c8.92-3.23,15.94-13.59,18.7-21.86,2.64-7.93,2.43-21-6.11-33.78-1.32-2-2.71-4-4.15-6.13-5.43-8-11.59-17-16.3-26.4-6.69-13.38-8.49-27.31-8.53-43.56-9.44,6.56-22,19.43-27.57,36.08-.86,2.56-8,25.75,3.93,50l1.43,2.89-2.61,1.91a13,13,0,0,1-13.56,1.18c-7-3.19-13-11.52-16.19-20-4.53,11.72-8,31.7-.51,50.14,11.12,27.36,30.45,43,53,43,23.11,0,43.57-15.89,54.73-42.5,5.18-12.37,6-31.5,2-45.51-3-10.43-7.43-18.94-15.06-30.94-1.93,4.08-4.57,8.26-7.27,10.06l-4.26-6.38c2.21-1.48,6-9.22,6.8-13l1.82-9.19,5.15,7.83c10.47,15.93,16.46,26.41,20.2,39.48,4.5,15.76,3.54,36.56-2.33,50.58-5.79,13.81-14.29,25.46-24.59,33.71A59.2,59.2,0,0,1,115.67,216.35Z"/></svg> \ No newline at end of file diff --git a/assets/logo/ignite_logomark.png b/assets/logo/ignite_logomark.png new file mode 100644 index 000000000000..b025cab97976 Binary files /dev/null and b/assets/logo/ignite_logomark.png differ diff --git a/assets/logo/ignite_logomark.svg b/assets/logo/ignite_logomark.svg new file mode 100644 index 000000000000..bf8fb7199c83 --- /dev/null +++ b/assets/logo/ignite_logomark.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 250 250"><defs><style>.cls-1{fill:#eaa700;}.cls-2{fill:#ee4c2c;}</style></defs><circle class="cls-1" cx="126.45" cy="195.95" r="12.76"/><path class="cls-2" d="M125,236.82c-30.69,0-56.73-20.71-71.42-56.83-12.9-31.73-1.6-67.37,7-78.55l7.13-9.25,1,11.64c1,11.43,9.06,25.07,16.9,28.63a7.84,7.84,0,0,0,3.55.8C79.41,110.19,81.23,85.85,86,71.41c9-26.9,31.26-45.92,44.05-52.15L136.81,16l-.17,7.46c-.47,21.53.89,39.23,9.1,55.65,5.32,10.64,12.45,21.08,18.73,30.29,1.73,2.53,3.41,5,5,7.37,12,17.94,12.14,36.6,8.32,48.07a60.53,60.53,0,0,1-9.75,17.9c-5.25,6.63-11.47,11.39-18,13.75l-3.1-8.57c10.58-3.83,18.93-16.15,22.2-26,3.14-9.43,2.89-24.93-7.25-40.14-1.57-2.35-3.23-4.78-4.94-7.28-6.45-9.45-13.76-20.16-19.36-31.36-7.94-15.89-10.08-32.44-10.14-51.74-11.21,7.78-26.16,23.08-32.75,42.85-1,3-9.51,30.6,4.68,59.37l1.69,3.44L98,139.36a15.53,15.53,0,0,1-16.12,1.41C73.49,137,66.45,127.08,62.62,117c-5.38,13.93-9.52,37.67-.61,59.58,13.22,32.5,36.17,51.14,63,51.14,27.47,0,51.78-18.88,65-50.49,6.16-14.69,7.17-37.43,2.42-54.07-3.54-12.38-8.82-22.49-17.9-36.75-2.28,4.85-5.43,9.82-8.63,12l-5.06-7.58c2.63-1.75,7.19-11,8.08-15.41l2.16-10.92,6.12,9.3c12.43,18.92,19.56,31.37,24,46.91,5.35,18.72,4.2,43.43-2.78,60.09-6.87,16.4-17,30.25-29.2,40C156.1,231.27,140.8,236.82,125,236.82Z"/></svg> \ No newline at end of file diff --git a/assets/logo/ignite_logomark_dark.png b/assets/logo/ignite_logomark_dark.png new file mode 100644 index 000000000000..3f507924f49f Binary files /dev/null and b/assets/logo/ignite_logomark_dark.png differ diff --git a/assets/logo/ignite_logomark_dark.svg b/assets/logo/ignite_logomark_dark.svg new file mode 100644 index 000000000000..4547e5e21b66 --- /dev/null +++ b/assets/logo/ignite_logomark_dark.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 250 250"><defs><style>.cls-1{fill:#2a2a2a;}</style></defs><circle class="cls-1" cx="126.45" cy="195.95" r="12.76"/><path class="cls-1" d="M125,236.82c-30.69,0-56.73-20.71-71.42-56.83-12.9-31.73-1.6-67.37,7-78.55l7.13-9.25,1,11.64c1,11.43,9.06,25.07,16.9,28.63a7.84,7.84,0,0,0,3.55.8C79.41,110.19,81.23,85.85,86,71.41c9-26.9,31.26-45.92,44.05-52.15L136.81,16l-.17,7.46c-.47,21.53.89,39.23,9.1,55.65,5.32,10.64,12.45,21.08,18.73,30.29,1.73,2.53,3.41,5,5,7.37,12,17.94,12.14,36.6,8.32,48.07a60.53,60.53,0,0,1-9.75,17.9c-5.25,6.63-11.47,11.39-18,13.75l-3.1-8.57c10.58-3.83,18.93-16.15,22.2-26,3.14-9.43,2.89-24.93-7.25-40.14-1.57-2.35-3.23-4.78-4.94-7.28-6.45-9.45-13.76-20.16-19.36-31.36-7.94-15.89-10.08-32.44-10.14-51.74-11.21,7.78-26.16,23.08-32.75,42.85-1,3-9.51,30.6,4.68,59.37l1.69,3.44L98,139.36a15.53,15.53,0,0,1-16.12,1.41C73.49,137,66.45,127.08,62.62,117c-5.38,13.93-9.52,37.67-.61,59.58,13.22,32.5,36.17,51.14,63,51.14,27.47,0,51.78-18.88,65-50.49,6.16-14.69,7.17-37.43,2.42-54.07-3.54-12.38-8.82-22.49-17.9-36.75-2.28,4.85-5.43,9.82-8.63,12l-5.06-7.58c2.63-1.75,7.19-11,8.08-15.41l2.16-10.92,6.12,9.3c12.43,18.92,19.56,31.37,24,46.91,5.35,18.72,4.2,43.43-2.78,60.09-6.87,16.4-17,30.25-29.2,40C156.1,231.27,140.8,236.82,125,236.82Z"/></svg> \ No newline at end of file diff --git a/assets/logo/ignite_logomark_light.png b/assets/logo/ignite_logomark_light.png new file mode 100644 index 000000000000..47837b575f04 Binary files /dev/null and b/assets/logo/ignite_logomark_light.png differ diff --git a/assets/logo/ignite_logomark_light.svg b/assets/logo/ignite_logomark_light.svg new file mode 100644 index 000000000000..0d4a64c6ae2b --- /dev/null +++ b/assets/logo/ignite_logomark_light.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 250 250"><defs><style>.cls-1{fill:#fff;}</style></defs><circle class="cls-1" cx="126.45" cy="195.95" r="12.76"/><path class="cls-1" d="M125,236.82c-30.69,0-56.73-20.71-71.42-56.83-12.9-31.73-1.6-67.37,7-78.55l7.13-9.25,1,11.64c1,11.43,9.06,25.07,16.9,28.63a7.84,7.84,0,0,0,3.55.8C79.41,110.19,81.23,85.85,86,71.41c9-26.9,31.26-45.92,44.05-52.15L136.81,16l-.17,7.46c-.47,21.53.89,39.23,9.1,55.65,5.32,10.64,12.45,21.08,18.73,30.29,1.73,2.53,3.41,5,5,7.37,12,17.94,12.14,36.6,8.32,48.07a60.53,60.53,0,0,1-9.75,17.9c-5.25,6.63-11.47,11.39-18,13.75l-3.1-8.57c10.58-3.83,18.93-16.15,22.2-26,3.14-9.43,2.89-24.93-7.25-40.14-1.57-2.35-3.23-4.78-4.94-7.28-6.45-9.45-13.76-20.16-19.36-31.36-7.94-15.89-10.08-32.44-10.14-51.74-11.21,7.78-26.16,23.08-32.75,42.85-1,3-9.51,30.6,4.68,59.37l1.69,3.44L98,139.36a15.53,15.53,0,0,1-16.12,1.41C73.49,137,66.45,127.08,62.62,117c-5.38,13.93-9.52,37.67-.61,59.58,13.22,32.5,36.17,51.14,63,51.14,27.47,0,51.78-18.88,65-50.49,6.16-14.69,7.17-37.43,2.42-54.07-3.54-12.38-8.82-22.49-17.9-36.75-2.28,4.85-5.43,9.82-8.63,12l-5.06-7.58c2.63-1.75,7.19-11,8.08-15.41l2.16-10.92,6.12,9.3c12.43,18.92,19.56,31.37,24,46.91,5.35,18.72,4.2,43.43-2.78,60.09-6.87,16.4-17,30.25-29.2,40C156.1,231.27,140.8,236.82,125,236.82Z"/></svg> \ No newline at end of file diff --git a/docs/source/_templates/_static/css/ignite_theme.css b/docs/source/_templates/_static/css/ignite_theme.css index b0baa745009c..a8f145986182 100644 --- a/docs/source/_templates/_static/css/ignite_theme.css +++ b/docs/source/_templates/_static/css/ignite_theme.css @@ -3,12 +3,12 @@ article.pytorch-article table tr th:first-of-type, article.pytorch-article table width: 17px; } -div.header-container a.header-logo +div.container a.header-logo { - height: 42px; - width: 135px; - background-image: url("../img/ignite-logo-dark.svg"); - background-size: 135px 42px; + height: 80px; + width: 160px; + background-image: url("../img/ignite_logo.svg"); + background-size: 160px; } /* versions block */ diff --git a/docs/source/_templates/_static/img/ignite-logo-dark.svg b/docs/source/_templates/_static/img/ignite-logo-dark.svg deleted file mode 100644 index cd4719ffe675..000000000000 --- a/docs/source/_templates/_static/img/ignite-logo-dark.svg +++ /dev/null @@ -1,18 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> -<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0" y="0" width="352.29" height="114.25" viewBox="0, 0, 352.29, 114.25"> - <g id="Layer_1"> - <g> - <path d="M67.526,42.195 C75.733,50.214 75.733,63.265 67.526,71.284 C59.319,79.303 45.955,79.303 37.756,71.284 C29.549,63.265 29.549,50.214 37.756,42.195 C37.756,42.195 37.756,42.195 37.756,42.195 L50.878,29.365 L52.524,27.536 L52.524,27.536 L52.524,17.915 C40.564,29.594 33.923,36.084 32.599,37.385 C21.576,48.148 21.576,65.33 32.599,76.094 C43.615,86.858 61.199,86.858 72.215,76.094 C83.464,65.33 83.464,47.92 72.449,37.385 C71.461,38.342 68.016,41.716 67.526,42.195 z" fill="#EE4C2C" id="bp3iEM1Cm"/> - <g> - <path d="M62.569,26.852 C59.741,26.852 57.443,29.098 57.443,31.861 C57.443,34.624 59.741,36.87 62.569,36.87 C65.397,36.87 67.695,34.624 67.695,31.861 C67.695,29.098 65.397,26.852 62.569,26.852 z" fill="#EE4C2C" id="d5B2guxX6D"/> - </g> - </g> - <path d="M110.607,85.125 L110.607,17.625 L103.803,17.625 L103.803,85.125 z" fill="#000000"/> - <path d="M131.127,52.941 C131.127,45.273 135.879,39.765 143.547,39.765 C151.863,39.765 156.183,45.273 156.183,52.617 C156.183,60.177 151.431,65.577 143.871,65.577 C135.879,65.577 131.127,60.069 131.127,52.941 z M136.959,86.529 C139.767,86.745 143.979,86.853 147.327,86.853 C157.155,86.853 162.339,88.905 162.339,94.737 C162.339,101.433 154.023,106.401 143.763,106.401 C133.287,106.401 126.915,102.081 126.915,96.249 C126.915,92.037 131.019,88.149 136.959,86.529 z M131.127,85.233 C124.755,86.853 120.435,91.821 120.435,97.437 C120.435,106.077 129.507,111.909 142.791,111.909 C156.615,111.909 168.819,104.457 168.819,93.549 C168.819,85.449 162.663,80.589 148.299,80.589 C149.055,80.589 141.927,80.589 141.927,80.589 C133.071,80.589 130.695,78.645 130.695,75.513 C130.695,72.705 133.611,70.653 136.203,69.573 C138.687,70.437 140.847,70.977 143.547,70.977 C154.563,70.977 162.771,63.309 162.771,52.833 C162.771,48.837 161.151,44.085 158.775,41.709 L169.683,42.789 L169.359,36.093 L155.643,37.173 C152.943,35.661 149.163,34.257 143.979,34.257 C132.315,34.257 124.539,42.357 124.539,52.833 C124.539,59.637 127.779,65.037 132.531,67.953 C127.995,69.681 124.431,73.245 124.431,77.349 C124.431,80.913 126.915,83.721 131.127,85.233 z" fill="#000000"/> - <path d="M217.203,85.125 L217.203,51.213 C217.203,40.413 211.371,34.257 201.435,34.257 C196.251,34.257 189.771,37.173 184.587,42.357 L184.587,34.257 L177.891,35.661 L177.891,85.125 L184.587,85.125 L184.587,47.649 C188.691,43.545 194.199,40.629 199.599,40.629 C207.051,40.629 210.507,44.517 210.507,53.157 L210.507,85.125 z" fill="#000000"/> - <path d="M240.423,19.353 C240.423,16.869 238.911,14.601 235.887,14.601 C232.863,14.601 231.351,16.869 231.351,19.353 C231.351,21.945 232.863,24.321 235.887,24.321 C238.911,24.321 240.423,21.945 240.423,19.353 z M239.235,85.125 L239.235,34.257 L232.539,35.661 L232.539,85.125 z" fill="#000000"/> - <path d="M279.627,84.153 L278.439,78.321 C275.415,79.617 272.175,80.697 268.935,80.697 C263.751,80.697 261.483,77.673 261.699,71.409 L262.455,41.817 L278.871,41.817 L279.195,35.877 L262.671,35.877 L262.995,24.321 L256.623,24.321 L256.299,35.553 L248.847,37.281 L248.523,41.493 L256.083,41.925 L255.327,71.193 C255.003,81.561 259.215,86.745 268.611,86.745 C272.175,86.745 276.171,85.773 279.627,84.153 z" fill="#000000"/> - <path d="M293.559,60.609 L327.039,60.393 C327.687,58.233 328.119,55.101 328.119,52.509 C328.119,41.385 320.991,34.257 310.083,34.257 C296.691,34.257 286.755,45.273 286.755,60.825 C286.755,75.837 296.367,86.529 310.623,86.529 C316.779,86.529 322.287,84.909 326.499,82.317 L325.203,76.161 C321.423,78.645 316.239,80.265 310.839,80.265 C300.471,80.265 293.775,72.273 293.559,61.149 z M293.991,55.317 C295.071,47.109 300.795,40.305 309.651,40.305 C317.427,40.305 321.531,45.489 321.531,53.049 C321.531,53.589 321.531,54.669 321.423,55.209 z" fill="#000000"/> - </g> -</svg> diff --git a/docs/source/_templates/_static/img/ignite_logo.svg b/docs/source/_templates/_static/img/ignite_logo.svg new file mode 100644 index 000000000000..9f1d67610a6f --- /dev/null +++ b/docs/source/_templates/_static/img/ignite_logo.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 501.38 250"><defs><style>.cls-1{fill:#ee4c2c;}.cls-2{fill:#eaa700;}</style></defs><path class="cls-1" d="M245,102.72h-5V60.79H253.5a22.73,22.73,0,0,1,5.57.64,14,14,0,0,1,4.43,2,8.7,8.7,0,0,1,3,3.53,11.73,11.73,0,0,1,1.06,5.15,19.19,19.19,0,0,1-.44,4.36,11.26,11.26,0,0,1-1.55,3.6,9,9,0,0,1-2.85,2.79,14.68,14.68,0,0,1-4.53,1.74,29,29,0,0,1-6.4.63q-2.89,0-6.84-.36Zm7.31-21.81a18.42,18.42,0,0,0,4.71-.53,9,9,0,0,0,3-1.32,5.38,5.38,0,0,0,1.66-2,7.87,7.87,0,0,0,.74-2.31,20.38,20.38,0,0,0,.14-2.54,6,6,0,0,0-2.4-5.24,11.15,11.15,0,0,0-6.51-1.68H245V80.86Z"/><path class="cls-1" d="M270.46,72h5l9.7,25.59L293.75,72h5q-12.43,34.25-12.86,35.31a17.32,17.32,0,0,1-4.68,7.11,11,11,0,0,1-7.23,2.25,18.85,18.85,0,0,1-2.83-.22l-.28-4c1,.08,1.83.11,2.35.11a8.35,8.35,0,0,0,4.81-1.16,9.65,9.65,0,0,0,2.92-4.36q1.15-2.94,1.71-4.54Z"/><path class="cls-1" d="M314.84,102.72H310V65.55H297.22V61.27h30.37v4.28H314.84Z"/><path class="cls-1" d="M351.64,87.33q0,7.14-3.81,11.59t-10.54,4.44A12.83,12.83,0,0,1,327,98.88a17.13,17.13,0,0,1-3.83-11.52q0-7.17,3.84-11.6t10.54-4.43q6.58,0,10.34,4.44T351.64,87.33ZM328,87.41a14.4,14.4,0,0,0,2.43,8.76,8.21,8.21,0,0,0,7.07,3.3,8,8,0,0,0,6.94-3.28q2.34-3.28,2.34-8.83c0-3.76-.77-6.71-2.31-8.87s-3.89-3.24-7.05-3.24a8.12,8.12,0,0,0-7,3.31C328.82,80.76,328,83.71,328,87.41Z"/><path class="cls-1" d="M372,71.69a14.67,14.67,0,0,1,2.3.17l-.17,4.32a13.38,13.38,0,0,0-2.24-.23,8.16,8.16,0,0,0-6.38,2.74,9.76,9.76,0,0,0-2.45,6.76v17.27h-4.68q0-28.31,0-30.75h3.9l.45,5.52a13.05,13.05,0,0,1,4.06-4.2A9.43,9.43,0,0,1,372,71.69Z"/><path class="cls-1" d="M389.6,103.36q-6.78,0-10.6-4.4t-3.83-11.52q0-7.17,3.91-11.64t10.63-4.47a17.61,17.61,0,0,1,4.61.59,10.22,10.22,0,0,1,3.52,1.54l-1.29,3.64a14.33,14.33,0,0,0-6.92-1.76,8.53,8.53,0,0,0-7.05,3.25Q380,81.83,380,87.47q0,5.49,2.56,8.7a8.6,8.6,0,0,0,7.11,3.21,16,16,0,0,0,7.4-2l.58,3.73A13.41,13.41,0,0,1,389.6,103.36Z"/><path class="cls-1" d="M424.44,102.72V84.14q0-4.64-1.48-6.73c-1-1.4-2.69-2.1-5.14-2.1a9.87,9.87,0,0,0-5.95,1.76,8,8,0,0,0-3.16,4.57,41.33,41.33,0,0,0-.25,4.68v16.4h-4.68V60l4.68-.28v12.5c0,1.07,0,2.38-.11,4a12.81,12.81,0,0,1,10.48-4.87q10.29,0,10.29,12.08v19.31Z"/><path class="cls-1" d="M227,182.88h-8.16V113H227Z"/><path class="cls-1" d="M246.47,206.59l-.7-7.13h9.63a37.68,37.68,0,0,0,7-.57,13.7,13.7,0,0,0,5-1.87,13.08,13.08,0,0,0,3.21-2.87,11,11,0,0,0,1.86-4.11,30.69,30.69,0,0,0,.85-5c.15-1.65.21-3.67.21-6.07v-4.58a21,21,0,0,1-6.56,6.26,18.54,18.54,0,0,1-9.49,2.2,21.29,21.29,0,0,1-8.19-1.54,19,19,0,0,1-6.33-4.08,24.83,24.83,0,0,1-4.39-5.91,27.06,27.06,0,0,1-2.65-6.94,32.11,32.11,0,0,1-.82-7.22,37.64,37.64,0,0,1,1.39-10.49,27.48,27.48,0,0,1,4.13-8.59,18.9,18.9,0,0,1,7.2-5.93A23.34,23.34,0,0,1,258,130a19.27,19.27,0,0,1,8.9,2,20.05,20.05,0,0,1,6.82,6v-6.9h7.7v48.07c0,2.21-.06,4.14-.17,5.79a39,39,0,0,1-.8,5.5,21.63,21.63,0,0,1-1.72,5.19,20.19,20.19,0,0,1-3.05,4.37,15.32,15.32,0,0,1-4.68,3.54,28.62,28.62,0,0,1-6.72,2.2,43.78,43.78,0,0,1-9.07.85Zm12.14-30.46a16.36,16.36,0,0,0,7-1.42,11.43,11.43,0,0,0,4.72-3.92,18.2,18.2,0,0,0,2.52-5.57,25.72,25.72,0,0,0,.83-6.7v-3.83a26.76,26.76,0,0,0-.85-7,18,18,0,0,0-2.62-5.72,12,12,0,0,0-4.72-3.94,15.79,15.79,0,0,0-6.87-1.42,14,14,0,0,0-5.93,1.23,12.76,12.76,0,0,0-4.39,3.23,19.77,19.77,0,0,0-2.93,4.68,23.77,23.77,0,0,0-1.72,5.41,29.24,29.24,0,0,0-.52,5.52,26.76,26.76,0,0,0,.56,5.43,22.89,22.89,0,0,0,1.8,5.24,18.71,18.71,0,0,0,3,4.47,13.37,13.37,0,0,0,4.37,3.09A13.77,13.77,0,0,0,258.61,176.13Z"/><path class="cls-1" d="M292.55,182.88v-51.8h6.61l1,7.41a23,23,0,0,1,7.67-6.16,21.75,21.75,0,0,1,9.94-2.34q9,0,13.27,5t4.3,15.46v32.39h-7.89q0-31.15-.05-33.38-.18-6.62-2.66-9.68t-8.29-3.07a16.84,16.84,0,0,0-10.15,2.93,13.29,13.29,0,0,0-5.29,7.6,49,49,0,0,0-.47,7.27v28.33Z"/><path class="cls-1" d="M353.76,112.24v9.35h-7.94v-9.35Zm0,18.84v51.8h-7.94v-51.8Z"/><path class="cls-1" d="M385.11,183.83q-7.65,0-10.86-3.73t-3.4-12V136.94h-7.69l.33-5.2,7.32-.66,2.69-11.85,5.19-.19v12h14.4v5.86h-14.4v29.7q0,5.9,1.8,8.31c1.19,1.61,3.24,2.41,6.13,2.41a60.73,60.73,0,0,0,6.76-.76l.33,6.24A53.69,53.69,0,0,1,385.11,183.83Z"/><path class="cls-1" d="M421.5,184q-11.28,0-17.77-7.18t-6.5-19.64q0-12,6.28-19.53A21,21,0,0,1,420,130q9.63,0,15.28,6.59T441,154.17c0,.38,0,1.07,0,2.08s0,1.73,0,2.17H405.35q.15,8.84,4.56,13.77t11.88,4.93a35.22,35.22,0,0,0,16.48-4.34l1.23,6.37A33.86,33.86,0,0,1,421.5,184Zm8-43.33a11.76,11.76,0,0,0-9.51-4.13,12.64,12.64,0,0,0-9.87,4.28q-3.82,4.28-4.49,11.68h27.34Q432.93,144.78,429.46,140.64Z"/><circle class="cls-2" cx="116.9" cy="181.95" r="10.74"/><path class="cls-1" d="M115.67,216.35c-25.85,0-47.76-17.44-60.12-47.84-10.87-26.71-1.36-56.71,5.89-66.12l6-7.78.87,9.79c.84,9.62,7.62,21.11,14.22,24.11a6.8,6.8,0,0,0,3,.67c-8.21-19.42-6.69-39.91-2.63-52.07C90.43,54.47,109.2,38.46,120,33.22l5.65-2.75-.14,6.28c-.4,18.12.75,33,7.66,46.84,4.47,9,10.47,17.75,15.77,25.5,1.45,2.13,2.86,4.2,4.2,6.2,10.07,15.11,10.22,30.81,7,40.47a51,51,0,0,1-8.21,15.06c-4.42,5.59-9.66,9.59-15.15,11.58l-2.62-7.21c8.92-3.23,15.94-13.59,18.7-21.86,2.64-7.93,2.43-21-6.11-33.78-1.32-2-2.71-4-4.15-6.13-5.43-8-11.59-17-16.3-26.4-6.69-13.38-8.49-27.31-8.53-43.56-9.44,6.56-22,19.43-27.57,36.08-.86,2.56-8,25.75,3.93,50l1.43,2.89-2.61,1.91a13,13,0,0,1-13.56,1.18c-7-3.19-13-11.52-16.19-20-4.53,11.72-8,31.7-.51,50.14,11.12,27.36,30.45,43,53,43,23.11,0,43.57-15.89,54.73-42.5,5.18-12.37,6-31.5,2-45.51-3-10.43-7.43-18.94-15.06-30.94-1.93,4.08-4.57,8.26-7.27,10.06l-4.26-6.38c2.21-1.48,6-9.22,6.8-13l1.82-9.19,5.15,7.83c10.47,15.93,16.46,26.41,20.2,39.48,4.5,15.76,3.54,36.56-2.33,50.58-5.79,13.81-14.29,25.46-24.59,33.71A59.2,59.2,0,0,1,115.67,216.35Z"/></svg> \ No newline at end of file diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html index a9b2e703eb08..7cdec01f76b2 100644 --- a/docs/source/_templates/layout.html +++ b/docs/source/_templates/layout.html @@ -84,8 +84,10 @@ <div class="container-fluid header-holder tutorials-header" id="header-holder"> <div class="container"> + + <a class="header-logo" href="{{ theme_variables.external_urls['home'] }}" aria-label="PyTorch"></a> + <div class="header-container"> - <a class="header-logo" href="{{ theme_variables.external_urls['home'] }}" aria-label="PyTorch"></a> <div class="main-menu"> <ul> diff --git a/docs/source/conf.py b/docs/source/conf.py index 25456089fee6..8bca8e3a0753 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -103,7 +103,7 @@ "logo_only": True, } -html_logo = "_static/img/ignite-logo-dark.svg" +html_logo = "_static/img/ignite_logo.svg" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the
netbox-community__netbox-1046
DeviceComponentFilterSet by name expects int instead of str. <!-- Please note: GitHub issues are to be used only for feature requests and bug reports. For installation assistance or general discussion, please join us on the mailing list: https://groups.google.com/forum/#!forum/netbox-discuss Please indicate "bug report" or "feature request" below. Be sure to search the existing set of issues (both open and closed) to see if a similar issue has already been raised. --> ### Issue type: Bug <!-- If filing a bug, please indicate the version of Python and NetBox you are running. (This is not necessary for feature requests.) --> **Python version:** 2.7.13 **NetBox version:** v2-develop <!-- If filing a bug, please record the exact steps taken to reproduce the bug and any errors messages that are generated. If filing a feature request, please precisely describe the data model or workflow you would like to see implemented, and provide a use case. --> Searching for interfaces by device name yields a [traceback](http://dpaste.com/2N3D7J2). Seems like this fixes. Will file a PR shortly. ```diff diff --git a/netbox/dcim/filters.py b/netbox/dcim/filters.py index 7992f8e..f94b172 100644 --- a/netbox/dcim/filters.py +++ b/netbox/dcim/filters.py @@ -401,7 +401,7 @@ class DeviceComponentFilterSet(django_filters.FilterSet): label='Device (ID)', ) device = django_filters.ModelMultipleChoiceFilter( - name='device', + name='device__name', queryset=Device.objects.all(), to_field_name='name', label='Device (name)', ```
[ { "content": "import django_filters\nfrom netaddr.core import AddrFormatError\n\nfrom django.db.models import Q\n\nfrom extras.filters import CustomFieldFilterSet\nfrom tenancy.models import Tenant\nfrom utilities.filters import NullableModelMultipleChoiceFilter, NumericInFilter\nfrom .models import (\n ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate, Device, DeviceBay,\n DeviceBayTemplate, DeviceRole, DeviceType, IFACE_FF_LAG, Interface, InterfaceTemplate, Manufacturer, InventoryItem,\n Platform, PowerOutlet, PowerOutletTemplate, PowerPort, PowerPortTemplate, Rack, RackGroup, RackReservation,\n RackRole, Region, Site, VIRTUAL_IFACE_TYPES,\n)\n\n\nclass SiteFilter(CustomFieldFilterSet, django_filters.FilterSet):\n id__in = NumericInFilter(name='id', lookup_expr='in')\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = NullableModelMultipleChoiceFilter(\n name='region',\n queryset=Region.objects.all(),\n label='Region (ID)',\n )\n region = NullableModelMultipleChoiceFilter(\n name='region',\n queryset=Region.objects.all(),\n to_field_name='slug',\n label='Region (slug)',\n )\n tenant_id = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n label='Tenant (ID)',\n )\n tenant = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n to_field_name='slug',\n label='Tenant (slug)',\n )\n\n class Meta:\n model = Site\n fields = ['q', 'name', 'facility', 'asn']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(facility__icontains=value) |\n Q(physical_address__icontains=value) |\n Q(shipping_address__icontains=value) |\n Q(comments__icontains=value)\n )\n try:\n qs_filter |= Q(asn=int(value.strip()))\n except ValueError:\n pass\n return queryset.filter(qs_filter)\n\n\nclass RackGroupFilter(django_filters.FilterSet):\n site_id = django_filters.ModelMultipleChoiceFilter(\n name='site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n\n class Meta:\n model = RackGroup\n fields = ['name']\n\n\nclass RackFilter(CustomFieldFilterSet, django_filters.FilterSet):\n id__in = NumericInFilter(name='id', lookup_expr='in')\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n name='site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n group_id = NullableModelMultipleChoiceFilter(\n name='group',\n queryset=RackGroup.objects.all(),\n label='Group (ID)',\n )\n group = NullableModelMultipleChoiceFilter(\n name='group',\n queryset=RackGroup.objects.all(),\n to_field_name='slug',\n label='Group',\n )\n tenant_id = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n label='Tenant (ID)',\n )\n tenant = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n to_field_name='slug',\n label='Tenant (slug)',\n )\n role_id = NullableModelMultipleChoiceFilter(\n name='role',\n queryset=RackRole.objects.all(),\n label='Role (ID)',\n )\n role = NullableModelMultipleChoiceFilter(\n name='role',\n queryset=RackRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n\n class Meta:\n model = Rack\n fields = ['u_height']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(facility_id__icontains=value) |\n Q(comments__icontains=value)\n )\n\n\nclass RackReservationFilter(django_filters.FilterSet):\n rack_id = django_filters.ModelMultipleChoiceFilter(\n name='rack',\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n\n class Meta:\n model = RackReservation\n fields = ['rack', 'user']\n\n\nclass DeviceTypeFilter(CustomFieldFilterSet, django_filters.FilterSet):\n id__in = NumericInFilter(name='id', lookup_expr='in')\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n name='manufacturer',\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n name='manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n\n class Meta:\n model = DeviceType\n fields = [\n 'model', 'part_number', 'u_height', 'is_console_server', 'is_pdu', 'is_network_device', 'subdevice_role',\n ]\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(manufacturer__name__icontains=value) |\n Q(model__icontains=value) |\n Q(part_number__icontains=value) |\n Q(comments__icontains=value)\n )\n\n\nclass DeviceTypeComponentFilterSet(django_filters.FilterSet):\n devicetype_id = django_filters.ModelMultipleChoiceFilter(\n name='device_type',\n queryset=DeviceType.objects.all(),\n label='Device type (ID)',\n )\n devicetype = django_filters.ModelMultipleChoiceFilter(\n name='device_type',\n queryset=DeviceType.objects.all(),\n to_field_name='name',\n label='Device type (name)',\n )\n\n\nclass ConsolePortTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = ConsolePortTemplate\n fields = ['name']\n\n\nclass ConsoleServerPortTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = ConsoleServerPortTemplate\n fields = ['name']\n\n\nclass PowerPortTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = PowerPortTemplate\n fields = ['name']\n\n\nclass PowerOutletTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = PowerOutletTemplate\n fields = ['name']\n\n\nclass InterfaceTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = InterfaceTemplate\n fields = ['name', 'form_factor']\n\n\nclass DeviceBayTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = DeviceBayTemplate\n fields = ['name']\n\n\nclass DeviceFilter(CustomFieldFilterSet, django_filters.FilterSet):\n id__in = NumericInFilter(name='id', lookup_expr='in')\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n mac_address = django_filters.CharFilter(\n method='_mac_address',\n label='MAC address',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n name='site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n rack_group_id = django_filters.ModelMultipleChoiceFilter(\n name='rack__group',\n queryset=RackGroup.objects.all(),\n label='Rack group (ID)',\n )\n rack_id = NullableModelMultipleChoiceFilter(\n name='rack',\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n role_id = django_filters.ModelMultipleChoiceFilter(\n name='device_role',\n queryset=DeviceRole.objects.all(),\n label='Role (ID)',\n )\n role = django_filters.ModelMultipleChoiceFilter(\n name='device_role__slug',\n queryset=DeviceRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n tenant_id = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n label='Tenant (ID)',\n )\n tenant = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n to_field_name='slug',\n label='Tenant (slug)',\n )\n device_type_id = django_filters.ModelMultipleChoiceFilter(\n name='device_type',\n queryset=DeviceType.objects.all(),\n label='Device type (ID)',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n name='device_type__manufacturer',\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n name='device_type__manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n model = django_filters.ModelMultipleChoiceFilter(\n name='device_type__slug',\n queryset=DeviceType.objects.all(),\n to_field_name='slug',\n label='Device model (slug)',\n )\n platform_id = NullableModelMultipleChoiceFilter(\n name='platform',\n queryset=Platform.objects.all(),\n label='Platform (ID)',\n )\n platform = NullableModelMultipleChoiceFilter(\n name='platform',\n queryset=Platform.objects.all(),\n to_field_name='slug',\n label='Platform (slug)',\n )\n status = django_filters.BooleanFilter(\n name='status',\n label='Status',\n )\n is_console_server = django_filters.BooleanFilter(\n name='device_type__is_console_server',\n label='Is a console server',\n )\n is_pdu = django_filters.BooleanFilter(\n name='device_type__is_pdu',\n label='Is a PDU',\n )\n is_network_device = django_filters.BooleanFilter(\n name='device_type__is_network_device',\n label='Is a network device',\n )\n has_primary_ip = django_filters.BooleanFilter(\n method='_has_primary_ip',\n label='Has a primary IP',\n )\n\n class Meta:\n model = Device\n fields = ['name', 'serial', 'asset_tag']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(serial__icontains=value.strip()) |\n Q(inventory_items__serial__icontains=value.strip()) |\n Q(asset_tag=value.strip()) |\n Q(comments__icontains=value)\n ).distinct()\n\n def _mac_address(self, queryset, name, value):\n value = value.strip()\n if not value:\n return queryset\n try:\n return queryset.filter(interfaces__mac_address=value).distinct()\n except AddrFormatError:\n return queryset.none()\n\n def _has_primary_ip(self, queryset, name, value):\n if value:\n return queryset.filter(\n Q(primary_ip4__isnull=False) |\n Q(primary_ip6__isnull=False)\n )\n else:\n return queryset.exclude(\n Q(primary_ip4__isnull=False) |\n Q(primary_ip6__isnull=False)\n )\n\n\nclass DeviceComponentFilterSet(django_filters.FilterSet):\n device_id = django_filters.ModelMultipleChoiceFilter(\n name='device',\n queryset=Device.objects.all(),\n label='Device (ID)',\n )\n device = django_filters.ModelMultipleChoiceFilter(\n name='device',\n queryset=Device.objects.all(),\n to_field_name='name',\n label='Device (name)',\n )\n\n\nclass ConsolePortFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = ConsolePort\n fields = ['name']\n\n\nclass ConsoleServerPortFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = ConsoleServerPort\n fields = ['name']\n\n\nclass PowerPortFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = PowerPort\n fields = ['name']\n\n\nclass PowerOutletFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = PowerOutlet\n fields = ['name']\n\n\nclass InterfaceFilter(DeviceComponentFilterSet):\n type = django_filters.CharFilter(\n method='filter_type',\n label='Interface type',\n )\n mac_address = django_filters.CharFilter(\n method='_mac_address',\n label='MAC address',\n )\n\n class Meta:\n model = Interface\n fields = ['name', 'form_factor']\n\n def filter_type(self, queryset, name, value):\n value = value.strip().lower()\n if value == 'physical':\n return queryset.exclude(form_factor__in=VIRTUAL_IFACE_TYPES)\n elif value == 'virtual':\n return queryset.filter(form_factor__in=VIRTUAL_IFACE_TYPES)\n elif value == 'lag':\n return queryset.filter(form_factor=IFACE_FF_LAG)\n return queryset\n\n def _mac_address(self, queryset, name, value):\n value = value.strip()\n if not value:\n return queryset\n try:\n return queryset.filter(mac_address=value)\n except AddrFormatError:\n return queryset.none()\n\n\nclass DeviceBayFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = DeviceBay\n fields = ['name']\n\n\nclass InventoryItemFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = InventoryItem\n fields = ['name']\n\n\nclass ConsoleConnectionFilter(django_filters.FilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device = django_filters.CharFilter(\n method='filter_device',\n label='Device',\n )\n\n class Meta:\n model = ConsolePort\n fields = ['name', 'connection_status']\n\n def filter_site(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(cs_port__device__site__slug=value)\n\n def filter_device(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(device__name__icontains=value) |\n Q(cs_port__device__name__icontains=value)\n )\n\n\nclass PowerConnectionFilter(django_filters.FilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device = django_filters.CharFilter(\n method='filter_device',\n label='Device',\n )\n\n class Meta:\n model = PowerPort\n fields = ['name', 'connection_status']\n\n def filter_site(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(power_outlet__device__site__slug=value)\n\n def filter_device(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(device__name__icontains=value) |\n Q(power_outlet__device__name__icontains=value)\n )\n\n\nclass InterfaceConnectionFilter(django_filters.FilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device = django_filters.CharFilter(\n method='filter_device',\n label='Device',\n )\n\n def filter_site(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(interface_a__device__site__slug=value) |\n Q(interface_b__device__site__slug=value)\n )\n\n def filter_device(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(interface_a__device__name__icontains=value) |\n Q(interface_b__device__name__icontains=value)\n )\n", "path": "netbox/dcim/filters.py" } ]
[ { "content": "import django_filters\nfrom netaddr.core import AddrFormatError\n\nfrom django.db.models import Q\n\nfrom extras.filters import CustomFieldFilterSet\nfrom tenancy.models import Tenant\nfrom utilities.filters import NullableModelMultipleChoiceFilter, NumericInFilter\nfrom .models import (\n ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate, Device, DeviceBay,\n DeviceBayTemplate, DeviceRole, DeviceType, IFACE_FF_LAG, Interface, InterfaceTemplate, Manufacturer, InventoryItem,\n Platform, PowerOutlet, PowerOutletTemplate, PowerPort, PowerPortTemplate, Rack, RackGroup, RackReservation,\n RackRole, Region, Site, VIRTUAL_IFACE_TYPES,\n)\n\n\nclass SiteFilter(CustomFieldFilterSet, django_filters.FilterSet):\n id__in = NumericInFilter(name='id', lookup_expr='in')\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n region_id = NullableModelMultipleChoiceFilter(\n name='region',\n queryset=Region.objects.all(),\n label='Region (ID)',\n )\n region = NullableModelMultipleChoiceFilter(\n name='region',\n queryset=Region.objects.all(),\n to_field_name='slug',\n label='Region (slug)',\n )\n tenant_id = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n label='Tenant (ID)',\n )\n tenant = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n to_field_name='slug',\n label='Tenant (slug)',\n )\n\n class Meta:\n model = Site\n fields = ['q', 'name', 'facility', 'asn']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n qs_filter = (\n Q(name__icontains=value) |\n Q(facility__icontains=value) |\n Q(physical_address__icontains=value) |\n Q(shipping_address__icontains=value) |\n Q(comments__icontains=value)\n )\n try:\n qs_filter |= Q(asn=int(value.strip()))\n except ValueError:\n pass\n return queryset.filter(qs_filter)\n\n\nclass RackGroupFilter(django_filters.FilterSet):\n site_id = django_filters.ModelMultipleChoiceFilter(\n name='site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n\n class Meta:\n model = RackGroup\n fields = ['name']\n\n\nclass RackFilter(CustomFieldFilterSet, django_filters.FilterSet):\n id__in = NumericInFilter(name='id', lookup_expr='in')\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n name='site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site (slug)',\n )\n group_id = NullableModelMultipleChoiceFilter(\n name='group',\n queryset=RackGroup.objects.all(),\n label='Group (ID)',\n )\n group = NullableModelMultipleChoiceFilter(\n name='group',\n queryset=RackGroup.objects.all(),\n to_field_name='slug',\n label='Group',\n )\n tenant_id = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n label='Tenant (ID)',\n )\n tenant = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n to_field_name='slug',\n label='Tenant (slug)',\n )\n role_id = NullableModelMultipleChoiceFilter(\n name='role',\n queryset=RackRole.objects.all(),\n label='Role (ID)',\n )\n role = NullableModelMultipleChoiceFilter(\n name='role',\n queryset=RackRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n\n class Meta:\n model = Rack\n fields = ['u_height']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(facility_id__icontains=value) |\n Q(comments__icontains=value)\n )\n\n\nclass RackReservationFilter(django_filters.FilterSet):\n rack_id = django_filters.ModelMultipleChoiceFilter(\n name='rack',\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n\n class Meta:\n model = RackReservation\n fields = ['rack', 'user']\n\n\nclass DeviceTypeFilter(CustomFieldFilterSet, django_filters.FilterSet):\n id__in = NumericInFilter(name='id', lookup_expr='in')\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n name='manufacturer',\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n name='manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n\n class Meta:\n model = DeviceType\n fields = [\n 'model', 'part_number', 'u_height', 'is_console_server', 'is_pdu', 'is_network_device', 'subdevice_role',\n ]\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(manufacturer__name__icontains=value) |\n Q(model__icontains=value) |\n Q(part_number__icontains=value) |\n Q(comments__icontains=value)\n )\n\n\nclass DeviceTypeComponentFilterSet(django_filters.FilterSet):\n devicetype_id = django_filters.ModelMultipleChoiceFilter(\n name='device_type',\n queryset=DeviceType.objects.all(),\n label='Device type (ID)',\n )\n devicetype = django_filters.ModelMultipleChoiceFilter(\n name='device_type',\n queryset=DeviceType.objects.all(),\n to_field_name='name',\n label='Device type (name)',\n )\n\n\nclass ConsolePortTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = ConsolePortTemplate\n fields = ['name']\n\n\nclass ConsoleServerPortTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = ConsoleServerPortTemplate\n fields = ['name']\n\n\nclass PowerPortTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = PowerPortTemplate\n fields = ['name']\n\n\nclass PowerOutletTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = PowerOutletTemplate\n fields = ['name']\n\n\nclass InterfaceTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = InterfaceTemplate\n fields = ['name', 'form_factor']\n\n\nclass DeviceBayTemplateFilter(DeviceTypeComponentFilterSet):\n\n class Meta:\n model = DeviceBayTemplate\n fields = ['name']\n\n\nclass DeviceFilter(CustomFieldFilterSet, django_filters.FilterSet):\n id__in = NumericInFilter(name='id', lookup_expr='in')\n q = django_filters.CharFilter(\n method='search',\n label='Search',\n )\n mac_address = django_filters.CharFilter(\n method='_mac_address',\n label='MAC address',\n )\n site_id = django_filters.ModelMultipleChoiceFilter(\n name='site',\n queryset=Site.objects.all(),\n label='Site (ID)',\n )\n site = django_filters.ModelMultipleChoiceFilter(\n name='site__slug',\n queryset=Site.objects.all(),\n to_field_name='slug',\n label='Site name (slug)',\n )\n rack_group_id = django_filters.ModelMultipleChoiceFilter(\n name='rack__group',\n queryset=RackGroup.objects.all(),\n label='Rack group (ID)',\n )\n rack_id = NullableModelMultipleChoiceFilter(\n name='rack',\n queryset=Rack.objects.all(),\n label='Rack (ID)',\n )\n role_id = django_filters.ModelMultipleChoiceFilter(\n name='device_role',\n queryset=DeviceRole.objects.all(),\n label='Role (ID)',\n )\n role = django_filters.ModelMultipleChoiceFilter(\n name='device_role__slug',\n queryset=DeviceRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n tenant_id = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n label='Tenant (ID)',\n )\n tenant = NullableModelMultipleChoiceFilter(\n name='tenant',\n queryset=Tenant.objects.all(),\n to_field_name='slug',\n label='Tenant (slug)',\n )\n device_type_id = django_filters.ModelMultipleChoiceFilter(\n name='device_type',\n queryset=DeviceType.objects.all(),\n label='Device type (ID)',\n )\n manufacturer_id = django_filters.ModelMultipleChoiceFilter(\n name='device_type__manufacturer',\n queryset=Manufacturer.objects.all(),\n label='Manufacturer (ID)',\n )\n manufacturer = django_filters.ModelMultipleChoiceFilter(\n name='device_type__manufacturer__slug',\n queryset=Manufacturer.objects.all(),\n to_field_name='slug',\n label='Manufacturer (slug)',\n )\n model = django_filters.ModelMultipleChoiceFilter(\n name='device_type__slug',\n queryset=DeviceType.objects.all(),\n to_field_name='slug',\n label='Device model (slug)',\n )\n platform_id = NullableModelMultipleChoiceFilter(\n name='platform',\n queryset=Platform.objects.all(),\n label='Platform (ID)',\n )\n platform = NullableModelMultipleChoiceFilter(\n name='platform',\n queryset=Platform.objects.all(),\n to_field_name='slug',\n label='Platform (slug)',\n )\n status = django_filters.BooleanFilter(\n name='status',\n label='Status',\n )\n is_console_server = django_filters.BooleanFilter(\n name='device_type__is_console_server',\n label='Is a console server',\n )\n is_pdu = django_filters.BooleanFilter(\n name='device_type__is_pdu',\n label='Is a PDU',\n )\n is_network_device = django_filters.BooleanFilter(\n name='device_type__is_network_device',\n label='Is a network device',\n )\n has_primary_ip = django_filters.BooleanFilter(\n method='_has_primary_ip',\n label='Has a primary IP',\n )\n\n class Meta:\n model = Device\n fields = ['name', 'serial', 'asset_tag']\n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(serial__icontains=value.strip()) |\n Q(inventory_items__serial__icontains=value.strip()) |\n Q(asset_tag=value.strip()) |\n Q(comments__icontains=value)\n ).distinct()\n\n def _mac_address(self, queryset, name, value):\n value = value.strip()\n if not value:\n return queryset\n try:\n return queryset.filter(interfaces__mac_address=value).distinct()\n except AddrFormatError:\n return queryset.none()\n\n def _has_primary_ip(self, queryset, name, value):\n if value:\n return queryset.filter(\n Q(primary_ip4__isnull=False) |\n Q(primary_ip6__isnull=False)\n )\n else:\n return queryset.exclude(\n Q(primary_ip4__isnull=False) |\n Q(primary_ip6__isnull=False)\n )\n\n\nclass DeviceComponentFilterSet(django_filters.FilterSet):\n device_id = django_filters.ModelMultipleChoiceFilter(\n name='device',\n queryset=Device.objects.all(),\n label='Device (ID)',\n )\n device = django_filters.ModelMultipleChoiceFilter(\n name='device__name',\n queryset=Device.objects.all(),\n to_field_name='name',\n label='Device (name)',\n )\n\n\nclass ConsolePortFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = ConsolePort\n fields = ['name']\n\n\nclass ConsoleServerPortFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = ConsoleServerPort\n fields = ['name']\n\n\nclass PowerPortFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = PowerPort\n fields = ['name']\n\n\nclass PowerOutletFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = PowerOutlet\n fields = ['name']\n\n\nclass InterfaceFilter(DeviceComponentFilterSet):\n type = django_filters.CharFilter(\n method='filter_type',\n label='Interface type',\n )\n mac_address = django_filters.CharFilter(\n method='_mac_address',\n label='MAC address',\n )\n\n class Meta:\n model = Interface\n fields = ['name', 'form_factor']\n\n def filter_type(self, queryset, name, value):\n value = value.strip().lower()\n if value == 'physical':\n return queryset.exclude(form_factor__in=VIRTUAL_IFACE_TYPES)\n elif value == 'virtual':\n return queryset.filter(form_factor__in=VIRTUAL_IFACE_TYPES)\n elif value == 'lag':\n return queryset.filter(form_factor=IFACE_FF_LAG)\n return queryset\n\n def _mac_address(self, queryset, name, value):\n value = value.strip()\n if not value:\n return queryset\n try:\n return queryset.filter(mac_address=value)\n except AddrFormatError:\n return queryset.none()\n\n\nclass DeviceBayFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = DeviceBay\n fields = ['name']\n\n\nclass InventoryItemFilter(DeviceComponentFilterSet):\n\n class Meta:\n model = InventoryItem\n fields = ['name']\n\n\nclass ConsoleConnectionFilter(django_filters.FilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device = django_filters.CharFilter(\n method='filter_device',\n label='Device',\n )\n\n class Meta:\n model = ConsolePort\n fields = ['name', 'connection_status']\n\n def filter_site(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(cs_port__device__site__slug=value)\n\n def filter_device(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(device__name__icontains=value) |\n Q(cs_port__device__name__icontains=value)\n )\n\n\nclass PowerConnectionFilter(django_filters.FilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device = django_filters.CharFilter(\n method='filter_device',\n label='Device',\n )\n\n class Meta:\n model = PowerPort\n fields = ['name', 'connection_status']\n\n def filter_site(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(power_outlet__device__site__slug=value)\n\n def filter_device(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(device__name__icontains=value) |\n Q(power_outlet__device__name__icontains=value)\n )\n\n\nclass InterfaceConnectionFilter(django_filters.FilterSet):\n site = django_filters.CharFilter(\n method='filter_site',\n label='Site (slug)',\n )\n device = django_filters.CharFilter(\n method='filter_device',\n label='Device',\n )\n\n def filter_site(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(interface_a__device__site__slug=value) |\n Q(interface_b__device__site__slug=value)\n )\n\n def filter_device(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(interface_a__device__name__icontains=value) |\n Q(interface_b__device__name__icontains=value)\n )\n", "path": "netbox/dcim/filters.py" } ]
diff --git a/netbox/dcim/filters.py b/netbox/dcim/filters.py index 7992f8e054e..f94b1728aec 100644 --- a/netbox/dcim/filters.py +++ b/netbox/dcim/filters.py @@ -401,7 +401,7 @@ class DeviceComponentFilterSet(django_filters.FilterSet): label='Device (ID)', ) device = django_filters.ModelMultipleChoiceFilter( - name='device', + name='device__name', queryset=Device.objects.all(), to_field_name='name', label='Device (name)',
ansible__ansible-40614
asa_config Python3 Compatibility Issue for "backup" <!--- Verify first that your issue/request is not already reported on GitHub. THIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED. Also test if the latest release, and devel branch are affected too. ALWAYS add information AFTER (OUTSIDE) these html comments. Otherwise it may end up being automatically closed by our bot. --> ##### SUMMARY "backup" in asa_config fails on Python 3.6.3 with Ansible 2.5.2. Same issue as [36717](https://github.com/ansible/ansible/issues/36717) but for asa_config. Changing line 58 of asa_config.py from:` for key in result.keys()` To either: `for key in result.copy().keys():` Or: `for key in list(result)` Should sort this out for py2 or py3. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME <!--- Insert, BELOW THIS COMMENT, the name of the module, plugin, task or feature. Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path--> asa_config ##### ANSIBLE VERSION <!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below --> ``` ansible 2.5.2 config file = /home/ignw/my_network_as_code/ansible.cfg configured module search path = ['/usr/local/lib/python3.6/dist-packages/napalm_ansible/modules'] ansible python module location = /usr/local/lib/python3.6/dist-packages/ansible executable location = /usr/local/bin/ansible python version = 3.6.3 (default, Oct 3 2017, 21:45:48) [GCC 7.2.0] ``` ##### CONFIGURATION <!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of "ansible-config dump --only-changed" Otherwise, mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables).--> DEFAULT_ACTION_PLUGIN_PATH(/home/ignw/my_network_as_code/ansible.cfg) = ['/usr/local/lib/python3.6/dist-packages/napalm_ansible/plug DEFAULT_HOST_LIST(/home/ignw/my_network_as_code/ansible.cfg) = ['/home/ignw/my_network_as_code/inventory'] DEFAULT_MODULE_PATH(/home/ignw/my_network_as_code/ansible.cfg) = ['/usr/local/lib/python3.6/dist-packages/napalm_ansible/modules'] HOST_KEY_CHECKING(/home/ignw/my_network_as_code/ansible.cfg) = False RETRY_FILES_ENABLED(/home/ignw/my_network_as_code/ansible.cfg) = False ##### OS / ENVIRONMENT <!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are managing, or say "N/A" for anything that is not platform-specific. Also mention the specific version of what you are trying to control, e.g. if this is a network bug the version of firmware on the network device.--> Distributor ID: Ubuntu Description: Ubuntu 17.10 Release: 17.10 Codename: artful Network device (Cisco ASAv): Cisco Adaptive Security Appliance Software Version 9.9(2) Firepower Extensible Operating System Version 2.3(1.84) Device Manager Version 7.9(2) Compiled on Sun 25-Mar-18 17:34 PDT by builders System image file is "boot:/asa992-smp-k8.bin" Config file at boot was "startup-config" Hardware: ASAv, 1024 MB RAM, CPU Clarkdale 2300 MHz, Model Id: ASAv5 Internal ATA Compact Flash, 1024MB Slot 1: ATA Compact Flash, 8192MB BIOS Flash Firmware Hub @ 0x0, 0KB ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case. For new features, show how the feature would be used. --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: Backup Cisco ASA Configurations connection: local hosts: cisco-asa gather_facts: no vars: creds: host: "{{ ansible_host }}" username: "{{ username }}" password: "{{ username }}" authorize: yes auth_pass: "{{ enable_password }}" tags: asa tasks: - asa_config: provider: "{{ creds }}" backup: yes ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> Backup of configuration to be placed in backup directory ##### ACTUAL RESULTS <!--- What actually happened? If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes below --> ``` <10.0.0.8> <10.0.0.8> ssh connection has completed successfully <10.0.0.8> connection to remote device started successfully <10.0.0.8> local domain socket listeners started successfully <10.0.0.8> <10.0.0.8> local domain socket path is /home/ignw/.ansible/pc/8617761c70 <10.0.0.8> socket_path: /home/ignw/.ansible/pc/8617761c70 Using module file /usr/local/lib/python3.6/dist-packages/ansible/modules/network/asa/asa_config.py <10.0.0.8> ESTABLISH LOCAL CONNECTION FOR USER: ignw <10.0.0.8> EXEC /bin/sh -c 'echo ~ && sleep 0' <10.0.0.8> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411 `" && echo ansible-tmp-1526941893.6014657-134187020317411="` echo /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411 `" ) && sleep 0' <10.0.0.8> PUT /home/ignw/.ansible/tmp/ansible-local-24856l3y7x_n7/tmpq9jw7ue_ TO /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/asa_config.py <10.0.0.8> EXEC /bin/sh -c 'chmod u+x /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/ /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/asa_config.py && sleep 0' <10.0.0.8> EXEC /bin/sh -c '/usr/bin/python3 /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/asa_config.py && sleep 0' <10.0.0.8> EXEC /bin/sh -c 'rm -f -r /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/ > /dev/null 2>&1 && sleep 0' The full traceback is: Traceback (most recent call last): File "/usr/local/lib/python3.6/dist-packages/ansible/executor/task_executor.py", line 138, in run res = self._execute() File "/usr/local/lib/python3.6/dist-packages/ansible/executor/task_executor.py", line 558, in _execute result = self._handler.run(task_vars=variables) File "/usr/local/lib/python3.6/dist-packages/ansible/plugins/action/asa_config.py", line 58, in run for key in result.keys().copy(): AttributeError: 'dict_keys' object has no attribute 'copy' fatal: [acme-sea-asa1]: FAILED! => { "msg": "Unexpected failure during module execution.", "stdout": "" } PLAY RECAP ************************************************************************************************************************* acme-sea-asa1 : ok=0 changed=0 unreachable=0 failed=1 ```
[ { "content": "#\n# (c) 2017, Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport re\nimport time\nimport glob\n\nfrom ansible.plugins.action.asa import ActionModule as _ActionModule\nfrom ansible.module_utils._text import to_text\nfrom ansible.module_utils.six.moves.urllib.parse import urlsplit\nfrom ansible.utils.vars import merge_hash\n\nPRIVATE_KEYS_RE = re.compile('__.+__')\n\n\nclass ActionModule(_ActionModule):\n\n def run(self, tmp=None, task_vars=None):\n\n if self._task.args.get('src'):\n try:\n self._handle_template()\n except ValueError as exc:\n return dict(failed=True, msg=exc.message)\n\n result = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n if self._task.args.get('backup') and result.get('__backup__'):\n # User requested backup and no error occurred in module.\n # NOTE: If there is a parameter error, _backup key may not be in results.\n filepath = self._write_backup(task_vars['inventory_hostname'],\n result['__backup__'])\n\n result['backup_path'] = filepath\n\n # strip out any keys that have two leading and two trailing\n # underscore characters\n for key in result.keys():\n if PRIVATE_KEYS_RE.match(key):\n del result[key]\n\n return result\n\n def _get_working_path(self):\n cwd = self._loader.get_basedir()\n if self._task._role is not None:\n cwd = self._task._role._role_path\n return cwd\n\n def _write_backup(self, host, contents):\n backup_path = self._get_working_path() + '/backup'\n if not os.path.exists(backup_path):\n os.mkdir(backup_path)\n for fn in glob.glob('%s/%s*' % (backup_path, host)):\n os.remove(fn)\n tstamp = time.strftime(\"%Y-%m-%d@%H:%M:%S\", time.localtime(time.time()))\n filename = '%s/%s_config.%s' % (backup_path, host, tstamp)\n open(filename, 'w').write(contents)\n return filename\n\n def _handle_template(self):\n src = self._task.args.get('src')\n working_path = self._get_working_path()\n\n if os.path.isabs(src) or urlsplit('src').scheme:\n source = src\n else:\n source = self._loader.path_dwim_relative(working_path, 'templates', src)\n if not source:\n source = self._loader.path_dwim_relative(working_path, src)\n\n if not os.path.exists(source):\n raise ValueError('path specified in src not found')\n\n try:\n with open(source, 'r') as f:\n template_data = to_text(f.read())\n except IOError:\n return dict(failed=True, msg='unable to load src file')\n\n # Create a template search path in the following order:\n # [working_path, self_role_path, dependent_role_paths, dirname(source)]\n searchpath = [working_path]\n if self._task._role is not None:\n searchpath.append(self._task._role._role_path)\n if hasattr(self._task, \"_block:\"):\n dep_chain = self._task._block.get_dep_chain()\n if dep_chain is not None:\n for role in dep_chain:\n searchpath.append(role._role_path)\n searchpath.append(os.path.dirname(source))\n self._templar.environment.loader.searchpath = searchpath\n self._task.args['src'] = self._templar.template(template_data)\n", "path": "lib/ansible/plugins/action/asa_config.py" } ]
[ { "content": "#\n# (c) 2017, Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport re\nimport time\nimport glob\n\nfrom ansible.plugins.action.asa import ActionModule as _ActionModule\nfrom ansible.module_utils._text import to_text\nfrom ansible.module_utils.six.moves.urllib.parse import urlsplit\nfrom ansible.utils.vars import merge_hash\n\nPRIVATE_KEYS_RE = re.compile('__.+__')\n\n\nclass ActionModule(_ActionModule):\n\n def run(self, tmp=None, task_vars=None):\n\n if self._task.args.get('src'):\n try:\n self._handle_template()\n except ValueError as exc:\n return dict(failed=True, msg=exc.message)\n\n result = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n if self._task.args.get('backup') and result.get('__backup__'):\n # User requested backup and no error occurred in module.\n # NOTE: If there is a parameter error, _backup key may not be in results.\n filepath = self._write_backup(task_vars['inventory_hostname'],\n result['__backup__'])\n\n result['backup_path'] = filepath\n\n # strip out any keys that have two leading and two trailing\n # underscore characters\n for key in list(result):\n if PRIVATE_KEYS_RE.match(key):\n del result[key]\n\n return result\n\n def _get_working_path(self):\n cwd = self._loader.get_basedir()\n if self._task._role is not None:\n cwd = self._task._role._role_path\n return cwd\n\n def _write_backup(self, host, contents):\n backup_path = self._get_working_path() + '/backup'\n if not os.path.exists(backup_path):\n os.mkdir(backup_path)\n for fn in glob.glob('%s/%s*' % (backup_path, host)):\n os.remove(fn)\n tstamp = time.strftime(\"%Y-%m-%d@%H:%M:%S\", time.localtime(time.time()))\n filename = '%s/%s_config.%s' % (backup_path, host, tstamp)\n open(filename, 'w').write(contents)\n return filename\n\n def _handle_template(self):\n src = self._task.args.get('src')\n working_path = self._get_working_path()\n\n if os.path.isabs(src) or urlsplit('src').scheme:\n source = src\n else:\n source = self._loader.path_dwim_relative(working_path, 'templates', src)\n if not source:\n source = self._loader.path_dwim_relative(working_path, src)\n\n if not os.path.exists(source):\n raise ValueError('path specified in src not found')\n\n try:\n with open(source, 'r') as f:\n template_data = to_text(f.read())\n except IOError:\n return dict(failed=True, msg='unable to load src file')\n\n # Create a template search path in the following order:\n # [working_path, self_role_path, dependent_role_paths, dirname(source)]\n searchpath = [working_path]\n if self._task._role is not None:\n searchpath.append(self._task._role._role_path)\n if hasattr(self._task, \"_block:\"):\n dep_chain = self._task._block.get_dep_chain()\n if dep_chain is not None:\n for role in dep_chain:\n searchpath.append(role._role_path)\n searchpath.append(os.path.dirname(source))\n self._templar.environment.loader.searchpath = searchpath\n self._task.args['src'] = self._templar.template(template_data)\n", "path": "lib/ansible/plugins/action/asa_config.py" } ]
diff --git a/lib/ansible/plugins/action/asa_config.py b/lib/ansible/plugins/action/asa_config.py index bd8916b65a1fb1..8513d84616d109 100644 --- a/lib/ansible/plugins/action/asa_config.py +++ b/lib/ansible/plugins/action/asa_config.py @@ -55,7 +55,7 @@ def run(self, tmp=None, task_vars=None): # strip out any keys that have two leading and two trailing # underscore characters - for key in result.keys(): + for key in list(result): if PRIVATE_KEYS_RE.match(key): del result[key]
obspy__obspy-2187
Check if taupy model is already opened As a suggestion, I think it makes more sense to check if `model` is already an instance of `TauPyModel` here: https://github.com/obspy/obspy/blob/6b43db0b6fec380e4d93d8d468eececcb74b4781/obspy/taup/tau.py#L942
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nHigh-level interface to travel-time calculation routines.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nimport copy\nimport warnings\n\nimport matplotlib as mpl\nimport matplotlib.cbook\nfrom matplotlib.cm import get_cmap\nimport matplotlib.text\nimport numpy as np\n\nfrom .helper_classes import Arrival\nfrom .tau_model import TauModel\nfrom .taup_create import TauPCreate\nfrom .taup_path import TauPPath\nfrom .taup_pierce import TauPPierce\nfrom .taup_time import TauPTime\nfrom .taup_geo import calc_dist, add_geo_to_arrivals\nfrom .utils import parse_phase_list\nimport obspy.geodetics.base as geodetics\n\n# Pretty paired colors. Reorder to have saturated colors first and remove\n# some colors at the end.\ncmap = get_cmap('Paired', lut=12)\nCOLORS = ['#%02x%02x%02x' % tuple(int(col * 255) for col in cmap(i)[:3])\n for i in range(12)]\nCOLORS = COLORS[1:][::2][:-1] + COLORS[::2][:-1]\n\n\nclass _SmartPolarText(matplotlib.text.Text):\n \"\"\"\n Automatically align text on polar plots to be away from axes.\n\n This class automatically sets the horizontal and vertical alignments\n based on which sides of the spherical axes the text is located.\n \"\"\"\n def draw(self, renderer, *args, **kwargs):\n fig = self.get_figure()\n midx = fig.get_figwidth() * fig.dpi / 2\n midy = fig.get_figheight() * fig.dpi / 2\n\n extent = self.get_window_extent(renderer, dpi=fig.dpi)\n points = extent.get_points()\n\n is_left = points[0, 0] < midx\n is_top = points[0, 1] > midy\n updated = False\n\n ha = 'right' if is_left else 'left'\n if self.get_horizontalalignment() != ha:\n self.set_horizontalalignment(ha)\n updated = True\n va = 'bottom' if is_top else 'top'\n if self.get_verticalalignment() != va:\n self.set_verticalalignment(va)\n updated = True\n\n if updated:\n self.update_bbox_position_size(renderer)\n\n matplotlib.text.Text.draw(self, renderer, *args, **kwargs)\n\n\nclass Arrivals(list):\n \"\"\"\n List like object of arrivals returned by :class:`TauPyModel` methods.\n\n :param arrivals: Initial arrivals to store.\n :type arrivals: :class:`list` of\n :class:`~obspy.taup.helper_classes.Arrival`\n :param model: The model used to calculate the arrivals.\n :type model: :class:`~TauPyModel`\n \"\"\"\n __slots__ = [\"model\"]\n\n def __init__(self, arrivals, model):\n super(Arrivals, self).__init__()\n self.model = model\n self.extend(arrivals)\n\n def __add__(self, other):\n if isinstance(other, Arrival):\n other = Arrivals([other], model=self.model)\n if not isinstance(other, Arrivals):\n raise TypeError\n return self.__class__(super(Arrivals, self).__add__(other),\n model=self.model)\n\n def __iadd__(self, other):\n if isinstance(other, Arrival):\n other = Arrivals([other], model=self.model)\n if not isinstance(other, Arrivals):\n raise TypeError\n self.extend(other)\n return self\n\n def __mul__(self, num):\n if not isinstance(num, int):\n raise TypeError(\"Integer expected\")\n arr = self.copy()\n for _i in range(num - 1):\n arr += self.copy()\n return arr\n\n def __imul__(self, num):\n if not isinstance(num, int):\n raise TypeError(\"Integer expected\")\n arr = self.copy()\n for _i in range(num - 1):\n self += arr\n return self\n\n def __setitem__(self, index, arrival):\n if (isinstance(index, slice) and\n all(isinstance(x, Arrival) for x in arrival)):\n super(Arrivals, self).__setitem__(index, arrival)\n elif isinstance(arrival, Arrival):\n super(Arrivals, self).__setitem__(index, arrival)\n else:\n msg = 'Only Arrival objects can be assigned.'\n raise TypeError(msg)\n\n def __setslice__(self, i, j, seq):\n if all(isinstance(x, Arrival) for x in seq):\n super(Arrivals, self).__setslice__(i, j, seq)\n else:\n msg = 'Only Arrival objects can be assigned.'\n raise TypeError(msg)\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n return self.__class__(super(Arrivals, self).__getitem__(index),\n model=self.model)\n else:\n return super(Arrivals, self).__getitem__(index)\n\n def __getslice__(self, i, j):\n return self.__class__(super(Arrivals, self).__getslice__(i, j),\n model=self.model)\n\n def __str__(self):\n return (\n \"{count} arrivals\\n\\t{arrivals}\"\n ).format(\n count=len(self),\n arrivals=\"\\n\\t\".join([str(_i) for _i in self]))\n\n def __repr__(self):\n return \"[%s]\" % (\", \".join([repr(_i) for _i in self]))\n\n def append(self, arrival):\n if isinstance(arrival, Arrival):\n super(Arrivals, self).append(arrival)\n else:\n msg = 'Append only supports a single Arrival object as argument.'\n raise TypeError(msg)\n\n def copy(self):\n return self.__class__(super(Arrivals, self).copy(),\n model=self.model)\n\n def plot_times(self, phase_list=None, plot_all=True, legend=False,\n show=True, fig=None, ax=None):\n \"\"\"\n Plot arrival times if any have been calculated.\n\n :param phase_list: List of phases for which travel times are plotted,\n if they exist. See `Phase naming in taup`_ for details on\n phase naming and convenience keys like ``'ttbasic'``. Defaults to\n ``'ttall'``.\n :type phase_list: list of str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: If boolean, specify whether or not to show the legend\n (at the default location.) If a str, specify the location of the\n legend.\n :type legend: bool or str\n :param show: Show the plot.\n :type show: bool\n :param fig: Figure instance to plot in. If not given, a new figure\n will be created.\n :type fig: :class:`matplotlib.figure.Figure`\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created.\n :type ax: :class:`matplotlib.axes.Axes`\n :returns: Matplotlib axes with the plot\n :rtype: :class:`matplotlib.axes.Axes`\n \"\"\"\n import matplotlib.pyplot as plt\n\n if not self:\n raise ValueError(\"No travel times.\")\n\n if phase_list is None:\n phase_list = (\"ttall\",)\n\n phase_names = sorted(parse_phase_list(phase_list))\n\n # create an axis/figure, if there is none yet:\n if fig and ax:\n pass\n elif not fig and not ax:\n fig, ax = plt.subplots()\n elif not ax:\n ax = fig.add_subplot(1, 1, 1)\n elif not fig:\n fig = ax.figure\n\n # extract the time/distance for each phase, and for each distance:\n for arrival in self:\n if plot_all is False:\n dist = arrival.purist_distance % 360.0\n distance = arrival.distance\n if distance < 0:\n distance = (distance % 360)\n if abs(dist - distance) / dist > 1E-5:\n continue\n if arrival.name in phase_names:\n ax.plot(arrival.distance, arrival.time / 60, '.',\n label=arrival.name,\n color=COLORS[phase_names.index(arrival.name)\n % len(COLORS)])\n else:\n ax.plot(arrival.distance, arrival.time / 60, '.',\n label=arrival.name, color='k')\n if legend:\n if isinstance(legend, bool):\n if 0 <= arrival.distance <= 180.0:\n loc = \"upper left\"\n else:\n loc = \"upper right\"\n else:\n loc = legend\n ax.legend(loc=loc, prop=dict(size=\"small\"), numpoints=1)\n\n ax.grid()\n ax.set_xlabel('Distance (degrees)')\n ax.set_ylabel('Time (minutes)')\n if show:\n plt.show()\n return ax\n\n def plot_rays(self, phase_list=None, plot_type=\"spherical\",\n plot_all=True, legend=False, label_arrivals=False,\n show=True, fig=None, ax=None):\n \"\"\"\n Plot ray paths if any have been calculated.\n\n :param phase_list: List of phases for which ray paths are plotted,\n if they exist. See `Phase naming in taup`_ for details on\n phase naming and convenience keys like ``'ttbasic'``. Defaults to\n ``'ttall'``.\n :type phase_list: list of str\n :param plot_type: Either ``\"spherical\"`` or ``\"cartesian\"``.\n A spherical plot is always global whereas a Cartesian one can\n also be local.\n :type plot_type: str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: If boolean, specify whether or not to show the legend\n (at the default location.) If a str, specify the location of the\n legend. If you are plotting a single phase, you may consider using\n the ``label_arrivals`` argument.\n :type legend: bool or str\n :param label_arrivals: Label the arrivals with their respective phase\n names. This setting is only useful if you are plotting a single\n phase as otherwise the names could be large and possibly overlap\n or clip. Consider using the ``legend`` parameter instead if you\n are plotting multiple phases.\n :type label_arrivals: bool\n :param show: Show the plot.\n :type show: bool\n :param fig: Figure to plot in. If not given, a new figure will be\n created.\n :type fig: :class:`matplotlib.figure.Figure`\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created. Must be a polar axes for the spherical plot and\n a regular one for the Cartesian plot.\n :type ax: :class:`matplotlib.axes.Axes`\n :returns: Matplotlib axes with the plot\n :rtype: :class:`matplotlib.axes.Axes`\n \"\"\"\n import matplotlib.pyplot as plt\n\n # I don't get this, but without sorting, I get a different\n # order each call:\n\n if phase_list is None:\n phase_list = (\"ttall\",)\n\n phase_names = sorted(parse_phase_list(phase_list))\n arrivals = []\n for arrival in self:\n if arrival.path is None:\n continue\n dist = arrival.purist_distance % 360.0\n distance = arrival.distance\n if distance < 0:\n distance = (distance % 360)\n if abs(dist - distance) / dist > 1E-5:\n if plot_all is False:\n continue\n # Mirror on axis.\n arrival = copy.deepcopy(arrival)\n arrival.path[\"dist\"] *= -1.0\n arrivals.append(arrival)\n\n if not arrivals:\n raise ValueError(\"Can only plot arrivals with calculated ray \"\n \"paths.\")\n\n # get the velocity discontinuities in your model, for plotting:\n discons = self.model.s_mod.v_mod.get_discontinuity_depths()\n\n if plot_type == \"spherical\":\n if ax and not isinstance(ax, mpl.projections.polar.PolarAxes):\n msg = (\"Axes instance provided for plotting with \"\n \"`plot_type='spherical'` but it seems the axes is not \"\n \"a polar axes.\")\n warnings.warn(msg)\n if fig and ax:\n pass\n elif not fig and not ax:\n fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))\n elif not ax:\n ax = fig.add_subplot(1, 1, 1, polar=True)\n elif not fig:\n fig = ax.figure\n\n ax.set_theta_zero_location('N')\n ax.set_theta_direction(-1)\n ax.set_xticks([])\n ax.set_yticks([])\n\n intp = matplotlib.cbook.simple_linear_interpolation\n radius = self.model.radius_of_planet\n for ray in arrivals:\n if ray.name in phase_names:\n # Requires interpolation,or diffracted phases look funny.\n ax.plot(intp(ray.path[\"dist\"], 100),\n radius - intp(ray.path[\"depth\"], 100),\n color=COLORS[phase_names.index(ray.name) %\n len(COLORS)], label=ray.name, lw=2.0)\n else:\n ax.plot(intp(ray.path[\"dist\"], 100),\n radius - intp(ray.path[\"depth\"], 100),\n color='k', label=ray.name, lw=2.0)\n ax.set_yticks(radius - discons)\n ax.xaxis.set_major_formatter(plt.NullFormatter())\n ax.yaxis.set_major_formatter(plt.NullFormatter())\n\n # Pretty earthquake marker.\n ax.plot([0], [radius - arrivals[0].source_depth],\n marker=\"*\", color=\"#FEF215\", markersize=20, zorder=10,\n markeredgewidth=1.5, markeredgecolor=\"0.3\",\n clip_on=False)\n\n # Pretty station marker.\n arrowprops = dict(arrowstyle='-|>,head_length=0.8,'\n 'head_width=0.5',\n color='#C95241', lw=1.5)\n station_radius = radius - arrivals[0].receiver_depth\n ax.annotate('',\n xy=(np.deg2rad(distance), station_radius),\n xycoords='data',\n xytext=(np.deg2rad(distance),\n station_radius + radius * 0.02),\n textcoords='data',\n arrowprops=arrowprops,\n clip_on=False)\n arrowprops = dict(arrowstyle='-|>,head_length=1.0,'\n 'head_width=0.6',\n color='0.3', lw=1.5, fill=False)\n ax.annotate('',\n xy=(np.deg2rad(distance), station_radius),\n xycoords='data',\n xytext=(np.deg2rad(distance),\n station_radius + radius * 0.01),\n textcoords='data',\n arrowprops=arrowprops,\n clip_on=False)\n if label_arrivals:\n name = ','.join(sorted(set(ray.name for ray in arrivals)))\n # We cannot just set the text of the annotations above because\n # it changes the arrow path.\n t = _SmartPolarText(np.deg2rad(distance),\n station_radius + radius * 0.1,\n name, clip_on=False)\n ax.add_artist(t)\n\n ax.set_rmax(radius)\n ax.set_rmin(0.0)\n\n if legend:\n if isinstance(legend, bool):\n if 0 <= distance <= 180.0:\n loc = \"upper left\"\n else:\n loc = \"upper right\"\n else:\n loc = legend\n ax.legend(loc=loc, prop=dict(size=\"small\"))\n\n elif plot_type == \"cartesian\":\n if ax and isinstance(ax, mpl.projections.polar.PolarAxes):\n msg = (\"Axes instance provided for plotting with \"\n \"`plot_type='cartesian'` but it seems the axes is \"\n \"a polar axes.\")\n warnings.warn(msg)\n if fig and ax:\n pass\n elif not fig and not ax:\n fig, ax = plt.subplots()\n ax.invert_yaxis()\n elif not ax:\n ax = fig.add_subplot(1, 1, 1)\n ax.invert_yaxis()\n elif not fig:\n fig = ax.figure\n\n # Plot the ray paths:\n for ray in arrivals:\n if ray.name in phase_names:\n ax.plot(np.rad2deg(ray.path[\"dist\"]), ray.path[\"depth\"],\n color=COLORS[phase_names.index(ray.name) %\n len(COLORS)],\n label=ray.name, lw=2.0)\n else:\n ax.plot(np.rad2deg(ray.path[\"dist\"]), ray.path[\"depth\"],\n color='k', label=ray.name, lw=2.0)\n\n # Pretty station marker:\n ms = 14\n station_marker_transform = matplotlib.transforms.offset_copy(\n ax.transData,\n fig=ax.get_figure(),\n y=ms / 2.0,\n units=\"points\")\n ax.plot([distance], [arrivals[0].receiver_depth],\n marker=\"v\", color=\"#C95241\",\n markersize=ms, zorder=10, markeredgewidth=1.5,\n markeredgecolor=\"0.3\", clip_on=False,\n transform=station_marker_transform)\n if label_arrivals:\n name = ','.join(sorted(set(ray.name for ray in arrivals)))\n ax.annotate(name,\n xy=(distance, arrivals[0].receiver_depth),\n xytext=(0, ms * 1.5),\n textcoords='offset points',\n ha='center', annotation_clip=False)\n\n # Pretty earthquake marker.\n ax.plot([0], [arrivals[0].source_depth], marker=\"*\",\n color=\"#FEF215\", markersize=20, zorder=10,\n markeredgewidth=1.5, markeredgecolor=\"0.3\",\n clip_on=False)\n\n # lines of major discontinuities:\n x = ax.get_xlim()\n y = ax.get_ylim()\n for depth in discons:\n if not (y[1] <= depth <= y[0]):\n continue\n ax.hlines(depth, x[0], x[1], color=\"0.5\", zorder=-1)\n\n # Plot some more station markers if necessary.\n possible_distances = [_i * (distance + 360.0)\n for _i in range(1, 10)]\n possible_distances += [-_i * (360.0 - distance) for _i in\n range(1, 10)]\n possible_distances = [_i for _i in possible_distances\n if x[0] <= _i <= x[1]]\n if possible_distances:\n ax.plot(possible_distances, [arrivals[0].receiver_depth]\n * len(possible_distances),\n marker=\"v\", color=\"#C95241\",\n markersize=ms, zorder=10, markeredgewidth=1.5,\n markeredgecolor=\"0.3\", clip_on=False, lw=0,\n transform=station_marker_transform)\n if legend:\n if isinstance(legend, bool):\n loc = \"lower left\"\n else:\n loc = legend\n ax.legend(loc=loc, prop=dict(size=\"small\"))\n ax.set_xlabel(\"Distance [deg]\")\n ax.set_ylabel(\"Depth [km]\")\n else:\n msg = \"Plot type '{}' is not a valid option.\".format(plot_type)\n raise ValueError(msg)\n if show:\n plt.show()\n return ax\n\n def plot(self, plot_type=\"spherical\", plot_all=True, legend=True,\n label_arrivals=False, ax=None, show=True):\n \"\"\"\n Plot ray paths if any have been calculated.\n\n :param plot_type: Either ``\"spherical\"`` or ``\"cartesian\"``.\n A spherical plot is always global whereas a Cartesian one can\n also be local.\n :type plot_type: str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: If boolean, specify whether or not to show the legend\n (at the default location.) If a str, specify the location of the\n legend. If you are plotting a single phase, you may consider using\n the ``label_arrivals`` argument.\n :type legend: bool or str\n :param label_arrivals: Label the arrivals with their respective phase\n names. This setting is only useful if you are plotting a single\n phase as otherwise the names could be large and possibly overlap\n or clip. Consider using the ``legend`` parameter instead if you\n are plotting multiple phases.\n :type label_arrivals: bool\n :param show: Show the plot.\n :type show: bool\n :param fig: Figure to plot in. If not given, a new figure will be\n created.\n :type fig: :class:`matplotlib.figure.Figure`\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created. Must be a polar axes for the spherical plot and\n a regular one for the Cartesian plot.\n :type ax: :class:`matplotlib.axes.Axes`\n :returns: Matplotlib axes with the plot\n :rtype: :class:`matplotlib.axes.Axes`\n\n .. versionchanged:: 1.1.0\n\n Deprecated.\n\n With the introduction of plot_times(), plot() has been renamed to\n plot_rays()\n \"\"\"\n\n # display warning\n from obspy.core.util.deprecation_helpers import ObsPyDeprecationWarning\n warnings.warn(\"The plot() function is deprecated. Please use \"\n \"arrivals.plot_rays()\",\n ObsPyDeprecationWarning, stacklevel=2)\n\n # call plot_rays, but with added fig and phase_list parameters:\n return self.plot_rays(plot_type=plot_type,\n plot_all=plot_all,\n legend=legend,\n label_arrivals=label_arrivals,\n ax=ax,\n fig=None,\n show=show,\n phase_list=(\"ttall\",))\n\n\nclass TauPyModel(object):\n \"\"\"\n Representation of a seismic model and methods for ray paths through it.\n \"\"\"\n\n def __init__(self, model=\"iasp91\", verbose=False, planet_flattening=0.0,\n cache=None):\n \"\"\"\n Loads an already created TauPy model.\n\n :param model: The model name. Either an internal TauPy model or a\n filename in the case of custom models.\n :param planet_flattening: Flattening parameter for the planet's\n ellipsoid (i.e. (a-b)/a, where a is the semimajor equatorial radius\n and b is the semiminor polar radius). A value of 0 (the default)\n gives a spherical planet. Note that this is only used to convert\n from geographical positions (source and receiver latitudes and\n longitudes) to epicentral distances - the actual traveltime and\n raypath calculations are performed on a spherical planet.\n :type planet_flattening: float\n :param cache: An object to use to cache models split at source depths.\n Generating results requires splitting a model at the source depth,\n which may be expensive. The cache allows faster calculation when\n multiple results are requested for the same source depth. The\n dictionary must be ordered, otherwise the LRU cache will not\n behave correctly. If ``False`` is specified, then no cache will be\n used.\n :type cache: :class:`collections.OrderedDict` or bool\n\n Usage:\n\n >>> from obspy.taup import tau\n >>> i91 = tau.TauPyModel()\n >>> print(i91.get_travel_times(10, 20)[0].name)\n P\n >>> i91.get_travel_times(10, 20)[0].time # doctest: +ELLIPSIS\n 272.675...\n >>> len(i91.get_travel_times(100, 50, phase_list = [\"P\", \"S\"]))\n 2\n \"\"\"\n self.verbose = verbose\n self.model = TauModel.from_file(model, cache=cache)\n self.planet_flattening = planet_flattening\n\n def get_travel_times(self, source_depth_in_km, distance_in_degree=None,\n phase_list=(\"ttall\",), receiver_depth_in_km=0.0):\n \"\"\"\n Return travel times of every given phase.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param distance_in_degree: Epicentral distance in degrees.\n :type distance_in_degree: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :param receiver_depth_in_km: Receiver depth in km\n :type receiver_depth_in_km: float\n\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n # Accessing the arrivals not just by list indices but by phase name\n # might be useful, but also difficult: several arrivals can have the\n # same phase.\n tt = TauPTime(self.model, phase_list, source_depth_in_km,\n distance_in_degree, receiver_depth_in_km)\n tt.run()\n return Arrivals(sorted(tt.arrivals, key=lambda x: x.time),\n model=self.model)\n\n def get_pierce_points(self, source_depth_in_km, distance_in_degree,\n phase_list=(\"ttall\",), receiver_depth_in_km=0.0):\n \"\"\"\n Return pierce points of every given phase.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param distance_in_degree: Epicentral distance in degrees.\n :type distance_in_degree: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :param receiver_depth_in_km: Receiver depth in km\n :type receiver_depth_in_km: float\n\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n pp = TauPPierce(self.model, phase_list, source_depth_in_km,\n distance_in_degree, receiver_depth_in_km)\n pp.run()\n return Arrivals(sorted(pp.arrivals, key=lambda x: x.time),\n model=self.model)\n\n def get_ray_paths(self, source_depth_in_km, distance_in_degree=None,\n phase_list=(\"ttall\",), receiver_depth_in_km=0.0):\n \"\"\"\n Return ray paths of every given phase.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param distance_in_degree: Epicentral distance in degrees.\n :type distance_in_degree: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :param receiver_depth_in_km: Receiver depth in km\n :type receiver_depth_in_km: float\n\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n rp = TauPPath(self.model, phase_list, source_depth_in_km,\n distance_in_degree, receiver_depth_in_km)\n rp.run()\n return Arrivals(sorted(rp.arrivals, key=lambda x: x.time),\n model=self.model)\n\n def get_travel_times_geo(self, source_depth_in_km, source_latitude_in_deg,\n source_longitude_in_deg, receiver_latitude_in_deg,\n receiver_longitude_in_deg, phase_list=(\"ttall\",)):\n \"\"\"\n Return travel times of every given phase given geographical data.\n\n .. note::\n\n Note that the conversion from source and receiver latitudes and\n longitudes to epicentral distances respects the model's flattening\n parameter, so this calculation can be performed for a ellipsoidal\n or spherical planet. However, the actual traveltime and raypath\n calculations are performed on a spherical planet. Ellipticity\n corrections of e.g. [Dziewonski1976]_ are not made.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param source_latitude_in_deg: Source latitude in degrees\n :type source_latitude_in_deg: float\n :param source_longitude_in_deg: Source longitude in degrees\n :type source_longitude_in_deg: float\n :param receiver_latitude_in_deg: Receiver latitude in degrees\n :type receiver_latitude_in_deg: float\n :param receiver_longitude_in_deg: Receiver longitude in degrees\n :type receiver_longitude_in_deg: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n distance_in_deg = calc_dist(source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening)\n arrivals = self.get_travel_times(source_depth_in_km, distance_in_deg,\n phase_list)\n return arrivals\n\n def get_pierce_points_geo(self, source_depth_in_km, source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n phase_list=(\"ttall\",),\n resample=False):\n \"\"\"\n Return ray paths of every given phase with geographical info.\n\n .. note::\n\n Note that the conversion from source and receiver latitudes and\n longitudes to epicentral distances respects the model's flattening\n parameter, so this calculation can be performed for a ellipsoidal\n or spherical planet. However, the actual traveltime and raypath\n calculations are performed on a spherical planet. Ellipticity\n corrections of e.g. [Dziewonski1976]_ are not made.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param source_latitude_in_deg: Source latitude in degrees\n :type source_latitude_in_deg: float\n :param source_longitude_in_deg: Source longitue in degrees\n :type source_longitude_in_deg: float\n :param receiver_latitude_in_deg: Receiver latitude in degrees\n :type receiver_latitude_in_deg: float\n :param receiver_longitude_in_deg: Receiver longitude in degrees\n :type receiver_longitude_in_deg: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :param resample: adds sample points to allow for easy cartesian\n interpolation. This is especially useful for phases\n like Pdiff.\n :type resample: boolean\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n distance_in_deg = calc_dist(source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening)\n\n arrivals = self.get_pierce_points(source_depth_in_km, distance_in_deg,\n phase_list)\n\n if geodetics.HAS_GEOGRAPHICLIB:\n arrivals = add_geo_to_arrivals(arrivals, source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening,\n resample=resample)\n else:\n msg = \"Not able to evaluate positions of pierce points. \" + \\\n \"Arrivals object will not be modified. \" + \\\n \"Install the Python module 'geographiclib' to solve \" + \\\n \"this issue.\"\n warnings.warn(msg)\n\n return arrivals\n\n def get_ray_paths_geo(self, source_depth_in_km, source_latitude_in_deg,\n source_longitude_in_deg, receiver_latitude_in_deg,\n receiver_longitude_in_deg, phase_list=(\"ttall\",),\n resample=False):\n \"\"\"\n Return ray paths of every given phase with geographical info.\n\n .. note::\n\n Note that the conversion from source and receiver latitudes and\n longitudes to epicentral distances respects the model's flattening\n parameter, so this calculation can be performed for a ellipsoidal\n or spherical planet. However, the actual traveltime and raypath\n calculations are performed on a spherical planet. Ellipticity\n corrections of e.g. [Dziewonski1976]_ are not made.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param source_latitude_in_deg: Source latitude in degrees\n :type source_latitude_in_deg: float\n :param source_longitude_in_deg: Source longitue in degrees\n :type source_longitude_in_deg: float\n :param receiver_latitude_in_deg: Receiver latitude in degrees\n :type receiver_latitude_in_deg: float\n :param receiver_longitude_in_deg: Receiver longitude in degrees\n :type receiver_longitude_in_deg: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n distance_in_deg = calc_dist(source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening)\n\n arrivals = self.get_ray_paths(source_depth_in_km, distance_in_deg,\n phase_list)\n\n if geodetics.HAS_GEOGRAPHICLIB:\n arrivals = add_geo_to_arrivals(arrivals, source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening,\n resample=resample)\n else:\n msg = \"Not able to evaluate positions of points on path. \" + \\\n \"Arrivals object will not be modified. \" + \\\n \"Install the Python module 'geographiclib' to solve \" + \\\n \"this issue.\"\n warnings.warn(msg)\n\n return arrivals\n\n\ndef create_taup_model(model_name, output_dir, input_dir):\n \"\"\"\n Create a .taup model from a .tvel file.\n\n :param model_name:\n :param output_dir:\n \"\"\"\n if \".\" in model_name:\n model_file_name = model_name\n else:\n model_file_name = model_name + \".tvel\"\n TauPCreate.main(model_file_name, output_dir, input_dir)\n\n\ndef plot_travel_times(source_depth, phase_list=(\"ttbasic\",), min_degrees=0,\n max_degrees=180, npoints=50, model='iasp91',\n plot_all=True, legend=True, verbose=False, fig=None,\n ax=None, show=True):\n \"\"\"\n Returns a travel time plot and any created axis instance of this\n plot.\n\n :param source_depth: Source depth in kilometers.\n :type source_depth: float\n :param min_degrees: minimum distance from the source (in degrees)\n :type min_degrees: float\n :param max_degrees: maximum distance from the source (in degrees)\n :type max_degrees: float\n :param npoints: Number of points to plot.\n :type npoints: int\n :param phase_list: List of phase names to plot.\n :type phase_list: list of str, optional\n :param model: string containing the model to use.\n :type model: str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: Whether or not to show the legend\n :type legend: bool\n :param verbose: Whether to print information about epicentral distances\n that did not have an arrival.\n :type verbose: bool\n :param fig: Figure to plot in. If not given, a new figure instance\n will be created.\n :type fig: :class:`matplotlib.axes.Axes\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created.\n param show: Show the plot.\n type show: bool\n :type ax: :class:`matplotlib.Figure.figure`\n :returns: ax\n :rtype: :class:`matplotlib.axes.Axes`\n\n .. rubric:: Example\n\n >>> from obspy.taup import plot_travel_times\n >>> ax = plot_travel_times(source_depth=10, phase_list=['P', 'S', 'PP'])\n\n .. plot::\n\n from obspy.taup import plot_travel_times\n ax = plot_travel_times(source_depth=10, phase_list=['P','S','PP'])\n \"\"\"\n import matplotlib.pyplot as plt\n\n # compute the requested arrivals:\n model = TauPyModel(model)\n\n # a list of epicentral distances without a travel time, and a flag:\n notimes = []\n plotted = False\n\n # calculate the arrival times and plot vs. epicentral distance:\n degrees = np.linspace(min_degrees, max_degrees, npoints)\n for degree in degrees:\n try:\n arrivals = model.get_ray_paths(source_depth, degree,\n phase_list=phase_list)\n ax = arrivals.plot_times(phase_list=phase_list, show=False,\n ax=ax, plot_all=plot_all)\n plotted = True\n except ValueError:\n notimes.append(degree)\n\n if plotted:\n if verbose:\n if len(notimes) == 1:\n tmpl = \"There was {} epicentral distance without an arrival\"\n else:\n tmpl = \"There were {} epicentral distances without an arrival\"\n print(tmpl.format(len(notimes)))\n else:\n raise ValueError(\"No arrival times to plot.\")\n\n if legend:\n # merge all arrival labels of a certain phase:\n handles, labels = ax.get_legend_handles_labels()\n labels, ids = np.unique(labels, return_index=True)\n handles = [handles[i] for i in ids]\n ax.legend(handles, labels, loc=2, numpoints=1)\n\n if show:\n plt.show()\n return ax\n\n\ndef plot_ray_paths(source_depth, min_degrees=0, max_degrees=360, npoints=10,\n plot_type='spherical', phase_list=['P', 'S', 'PP'],\n model='iasp91', plot_all=True, legend=False,\n label_arrivals=False, verbose=False, fig=None, show=True,\n ax=None):\n \"\"\"\n Plot ray paths for seismic phases.\n\n :param source_depth: Source depth in kilometers.\n :type source_depth: float\n :param min_degrees: minimum distance from the source (in degrees).\n :type min_degrees: float\n :param max_degrees: maximum distance from the source (in degrees).\n :type max_degrees: float\n :param npoints: Number of receivers to plot.\n :type npoints: int\n :param plot_type: type of plot to create.\n :type plot_type: str\n :param phase_list: List of phase names.\n :type phase_list: list of str\n :param model: Model name.\n :type model: str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: Whether or not to show the legend\n :type legend: bool\n :param label_arrivals: Label the arrivals with their respective phase\n names. This setting is only useful if you are plotting a single\n phase as otherwise the names could be large and possibly overlap\n or clip. Consider using the ``legend`` parameter instead if you\n are plotting multiple phases.\n :type label_arrivals: bool\n :param verbose: Whether to print information about selected phases that\n were not encountered at individual epicentral distances.\n :type verbose: bool\n :param fig: Figure to plot into. If not given, a new figure instance\n will be created.\n :type fig: :class:`matplotlib.figure.Figure`\n :param show: Show the plot.\n :type show: bool\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created.\n :type ax: :class:`matplotlib.axes.Axes`\n :returns: Matplotlib axes with the plot\n :rtype: :class:`matplotlib.axes.Axes`\n\n .. rubric:: Example\n\n >>> from obspy.taup.tau import plot_ray_paths\n >>> import matplotlib.pyplot as plt\n >>> fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(polar=True))\n >>> ax = plot_ray_paths(source_depth=10, plot_type=\"spherical\",\n ... ax=ax, fig=fig, legend=True,\n ... phase_list=['P', 'S', 'PP'], verbose=True)\n There were rays for all but the following epicentral distances:\n [0.0, 360.0]\n\n .. plot::\n\n from obspy.taup.tau import plot_ray_paths\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(polar=True))\n ax = plot_ray_paths(source_depth=10, plot_type=\"spherical\",\n ax=ax, fig=fig, legend=True,\n phase_list=['P','S','PP'])\n \"\"\"\n import matplotlib.pyplot as plt\n model = TauPyModel(model)\n\n # set up a list of epicentral distances without a ray, and a flag:\n norays = []\n plotted = False\n\n # calculate the arrival times and plot vs. epicentral distance:\n degrees = np.linspace(min_degrees, max_degrees, npoints)\n for degree in degrees:\n try:\n arrivals = model.get_ray_paths(source_depth, degree,\n phase_list=phase_list)\n ax = arrivals.plot_rays(phase_list=phase_list, show=False,\n ax=ax, plot_type=plot_type,\n plot_all=plot_all, legend=False)\n plotted = True\n except ValueError:\n norays.append(degree)\n\n if plotted:\n if verbose:\n print(\"There were rays for all but the following epicentral \"\n \"distances:\\n\", norays)\n else:\n raise ValueError(\"No ray paths to plot.\")\n\n if legend:\n # merge all arrival labels of a certain phase:\n handles, labels = ax.get_legend_handles_labels()\n labels, ids = np.unique(labels, return_index=True)\n handles = [handles[i] for i in ids]\n ax.legend(handles, labels, loc=2, numpoints=1)\n\n if show:\n plt.show()\n return ax\n", "path": "obspy/taup/tau.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nHigh-level interface to travel-time calculation routines.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nimport copy\nimport warnings\n\nimport matplotlib as mpl\nimport matplotlib.cbook\nfrom matplotlib.cm import get_cmap\nimport matplotlib.text\nimport numpy as np\n\nfrom .helper_classes import Arrival\nfrom .tau_model import TauModel\nfrom .taup_create import TauPCreate\nfrom .taup_path import TauPPath\nfrom .taup_pierce import TauPPierce\nfrom .taup_time import TauPTime\nfrom .taup_geo import calc_dist, add_geo_to_arrivals\nfrom .utils import parse_phase_list\nimport obspy.geodetics.base as geodetics\n\n# Pretty paired colors. Reorder to have saturated colors first and remove\n# some colors at the end.\ncmap = get_cmap('Paired', lut=12)\nCOLORS = ['#%02x%02x%02x' % tuple(int(col * 255) for col in cmap(i)[:3])\n for i in range(12)]\nCOLORS = COLORS[1:][::2][:-1] + COLORS[::2][:-1]\n\n\nclass _SmartPolarText(matplotlib.text.Text):\n \"\"\"\n Automatically align text on polar plots to be away from axes.\n\n This class automatically sets the horizontal and vertical alignments\n based on which sides of the spherical axes the text is located.\n \"\"\"\n def draw(self, renderer, *args, **kwargs):\n fig = self.get_figure()\n midx = fig.get_figwidth() * fig.dpi / 2\n midy = fig.get_figheight() * fig.dpi / 2\n\n extent = self.get_window_extent(renderer, dpi=fig.dpi)\n points = extent.get_points()\n\n is_left = points[0, 0] < midx\n is_top = points[0, 1] > midy\n updated = False\n\n ha = 'right' if is_left else 'left'\n if self.get_horizontalalignment() != ha:\n self.set_horizontalalignment(ha)\n updated = True\n va = 'bottom' if is_top else 'top'\n if self.get_verticalalignment() != va:\n self.set_verticalalignment(va)\n updated = True\n\n if updated:\n self.update_bbox_position_size(renderer)\n\n matplotlib.text.Text.draw(self, renderer, *args, **kwargs)\n\n\nclass Arrivals(list):\n \"\"\"\n List like object of arrivals returned by :class:`TauPyModel` methods.\n\n :param arrivals: Initial arrivals to store.\n :type arrivals: :class:`list` of\n :class:`~obspy.taup.helper_classes.Arrival`\n :param model: The model used to calculate the arrivals.\n :type model: :class:`~TauPyModel`\n \"\"\"\n __slots__ = [\"model\"]\n\n def __init__(self, arrivals, model):\n super(Arrivals, self).__init__()\n self.model = model\n self.extend(arrivals)\n\n def __add__(self, other):\n if isinstance(other, Arrival):\n other = Arrivals([other], model=self.model)\n if not isinstance(other, Arrivals):\n raise TypeError\n return self.__class__(super(Arrivals, self).__add__(other),\n model=self.model)\n\n def __iadd__(self, other):\n if isinstance(other, Arrival):\n other = Arrivals([other], model=self.model)\n if not isinstance(other, Arrivals):\n raise TypeError\n self.extend(other)\n return self\n\n def __mul__(self, num):\n if not isinstance(num, int):\n raise TypeError(\"Integer expected\")\n arr = self.copy()\n for _i in range(num - 1):\n arr += self.copy()\n return arr\n\n def __imul__(self, num):\n if not isinstance(num, int):\n raise TypeError(\"Integer expected\")\n arr = self.copy()\n for _i in range(num - 1):\n self += arr\n return self\n\n def __setitem__(self, index, arrival):\n if (isinstance(index, slice) and\n all(isinstance(x, Arrival) for x in arrival)):\n super(Arrivals, self).__setitem__(index, arrival)\n elif isinstance(arrival, Arrival):\n super(Arrivals, self).__setitem__(index, arrival)\n else:\n msg = 'Only Arrival objects can be assigned.'\n raise TypeError(msg)\n\n def __setslice__(self, i, j, seq):\n if all(isinstance(x, Arrival) for x in seq):\n super(Arrivals, self).__setslice__(i, j, seq)\n else:\n msg = 'Only Arrival objects can be assigned.'\n raise TypeError(msg)\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n return self.__class__(super(Arrivals, self).__getitem__(index),\n model=self.model)\n else:\n return super(Arrivals, self).__getitem__(index)\n\n def __getslice__(self, i, j):\n return self.__class__(super(Arrivals, self).__getslice__(i, j),\n model=self.model)\n\n def __str__(self):\n return (\n \"{count} arrivals\\n\\t{arrivals}\"\n ).format(\n count=len(self),\n arrivals=\"\\n\\t\".join([str(_i) for _i in self]))\n\n def __repr__(self):\n return \"[%s]\" % (\", \".join([repr(_i) for _i in self]))\n\n def append(self, arrival):\n if isinstance(arrival, Arrival):\n super(Arrivals, self).append(arrival)\n else:\n msg = 'Append only supports a single Arrival object as argument.'\n raise TypeError(msg)\n\n def copy(self):\n return self.__class__(super(Arrivals, self).copy(),\n model=self.model)\n\n def plot_times(self, phase_list=None, plot_all=True, legend=False,\n show=True, fig=None, ax=None):\n \"\"\"\n Plot arrival times if any have been calculated.\n\n :param phase_list: List of phases for which travel times are plotted,\n if they exist. See `Phase naming in taup`_ for details on\n phase naming and convenience keys like ``'ttbasic'``. Defaults to\n ``'ttall'``.\n :type phase_list: list of str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: If boolean, specify whether or not to show the legend\n (at the default location.) If a str, specify the location of the\n legend.\n :type legend: bool or str\n :param show: Show the plot.\n :type show: bool\n :param fig: Figure instance to plot in. If not given, a new figure\n will be created.\n :type fig: :class:`matplotlib.figure.Figure`\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created.\n :type ax: :class:`matplotlib.axes.Axes`\n :returns: Matplotlib axes with the plot\n :rtype: :class:`matplotlib.axes.Axes`\n \"\"\"\n import matplotlib.pyplot as plt\n\n if not self:\n raise ValueError(\"No travel times.\")\n\n if phase_list is None:\n phase_list = (\"ttall\",)\n\n phase_names = sorted(parse_phase_list(phase_list))\n\n # create an axis/figure, if there is none yet:\n if fig and ax:\n pass\n elif not fig and not ax:\n fig, ax = plt.subplots()\n elif not ax:\n ax = fig.add_subplot(1, 1, 1)\n elif not fig:\n fig = ax.figure\n\n # extract the time/distance for each phase, and for each distance:\n for arrival in self:\n if plot_all is False:\n dist = arrival.purist_distance % 360.0\n distance = arrival.distance\n if distance < 0:\n distance = (distance % 360)\n if abs(dist - distance) / dist > 1E-5:\n continue\n if arrival.name in phase_names:\n ax.plot(arrival.distance, arrival.time / 60, '.',\n label=arrival.name,\n color=COLORS[phase_names.index(arrival.name)\n % len(COLORS)])\n else:\n ax.plot(arrival.distance, arrival.time / 60, '.',\n label=arrival.name, color='k')\n if legend:\n if isinstance(legend, bool):\n if 0 <= arrival.distance <= 180.0:\n loc = \"upper left\"\n else:\n loc = \"upper right\"\n else:\n loc = legend\n ax.legend(loc=loc, prop=dict(size=\"small\"), numpoints=1)\n\n ax.grid()\n ax.set_xlabel('Distance (degrees)')\n ax.set_ylabel('Time (minutes)')\n if show:\n plt.show()\n return ax\n\n def plot_rays(self, phase_list=None, plot_type=\"spherical\",\n plot_all=True, legend=False, label_arrivals=False,\n show=True, fig=None, ax=None):\n \"\"\"\n Plot ray paths if any have been calculated.\n\n :param phase_list: List of phases for which ray paths are plotted,\n if they exist. See `Phase naming in taup`_ for details on\n phase naming and convenience keys like ``'ttbasic'``. Defaults to\n ``'ttall'``.\n :type phase_list: list of str\n :param plot_type: Either ``\"spherical\"`` or ``\"cartesian\"``.\n A spherical plot is always global whereas a Cartesian one can\n also be local.\n :type plot_type: str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: If boolean, specify whether or not to show the legend\n (at the default location.) If a str, specify the location of the\n legend. If you are plotting a single phase, you may consider using\n the ``label_arrivals`` argument.\n :type legend: bool or str\n :param label_arrivals: Label the arrivals with their respective phase\n names. This setting is only useful if you are plotting a single\n phase as otherwise the names could be large and possibly overlap\n or clip. Consider using the ``legend`` parameter instead if you\n are plotting multiple phases.\n :type label_arrivals: bool\n :param show: Show the plot.\n :type show: bool\n :param fig: Figure to plot in. If not given, a new figure will be\n created.\n :type fig: :class:`matplotlib.figure.Figure`\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created. Must be a polar axes for the spherical plot and\n a regular one for the Cartesian plot.\n :type ax: :class:`matplotlib.axes.Axes`\n :returns: Matplotlib axes with the plot\n :rtype: :class:`matplotlib.axes.Axes`\n \"\"\"\n import matplotlib.pyplot as plt\n\n # I don't get this, but without sorting, I get a different\n # order each call:\n\n if phase_list is None:\n phase_list = (\"ttall\",)\n\n phase_names = sorted(parse_phase_list(phase_list))\n arrivals = []\n for arrival in self:\n if arrival.path is None:\n continue\n dist = arrival.purist_distance % 360.0\n distance = arrival.distance\n if distance < 0:\n distance = (distance % 360)\n if abs(dist - distance) / dist > 1E-5:\n if plot_all is False:\n continue\n # Mirror on axis.\n arrival = copy.deepcopy(arrival)\n arrival.path[\"dist\"] *= -1.0\n arrivals.append(arrival)\n\n if not arrivals:\n raise ValueError(\"Can only plot arrivals with calculated ray \"\n \"paths.\")\n\n # get the velocity discontinuities in your model, for plotting:\n discons = self.model.s_mod.v_mod.get_discontinuity_depths()\n\n if plot_type == \"spherical\":\n if ax and not isinstance(ax, mpl.projections.polar.PolarAxes):\n msg = (\"Axes instance provided for plotting with \"\n \"`plot_type='spherical'` but it seems the axes is not \"\n \"a polar axes.\")\n warnings.warn(msg)\n if fig and ax:\n pass\n elif not fig and not ax:\n fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))\n elif not ax:\n ax = fig.add_subplot(1, 1, 1, polar=True)\n elif not fig:\n fig = ax.figure\n\n ax.set_theta_zero_location('N')\n ax.set_theta_direction(-1)\n ax.set_xticks([])\n ax.set_yticks([])\n\n intp = matplotlib.cbook.simple_linear_interpolation\n radius = self.model.radius_of_planet\n for ray in arrivals:\n if ray.name in phase_names:\n # Requires interpolation,or diffracted phases look funny.\n ax.plot(intp(ray.path[\"dist\"], 100),\n radius - intp(ray.path[\"depth\"], 100),\n color=COLORS[phase_names.index(ray.name) %\n len(COLORS)], label=ray.name, lw=2.0)\n else:\n ax.plot(intp(ray.path[\"dist\"], 100),\n radius - intp(ray.path[\"depth\"], 100),\n color='k', label=ray.name, lw=2.0)\n ax.set_yticks(radius - discons)\n ax.xaxis.set_major_formatter(plt.NullFormatter())\n ax.yaxis.set_major_formatter(plt.NullFormatter())\n\n # Pretty earthquake marker.\n ax.plot([0], [radius - arrivals[0].source_depth],\n marker=\"*\", color=\"#FEF215\", markersize=20, zorder=10,\n markeredgewidth=1.5, markeredgecolor=\"0.3\",\n clip_on=False)\n\n # Pretty station marker.\n arrowprops = dict(arrowstyle='-|>,head_length=0.8,'\n 'head_width=0.5',\n color='#C95241', lw=1.5)\n station_radius = radius - arrivals[0].receiver_depth\n ax.annotate('',\n xy=(np.deg2rad(distance), station_radius),\n xycoords='data',\n xytext=(np.deg2rad(distance),\n station_radius + radius * 0.02),\n textcoords='data',\n arrowprops=arrowprops,\n clip_on=False)\n arrowprops = dict(arrowstyle='-|>,head_length=1.0,'\n 'head_width=0.6',\n color='0.3', lw=1.5, fill=False)\n ax.annotate('',\n xy=(np.deg2rad(distance), station_radius),\n xycoords='data',\n xytext=(np.deg2rad(distance),\n station_radius + radius * 0.01),\n textcoords='data',\n arrowprops=arrowprops,\n clip_on=False)\n if label_arrivals:\n name = ','.join(sorted(set(ray.name for ray in arrivals)))\n # We cannot just set the text of the annotations above because\n # it changes the arrow path.\n t = _SmartPolarText(np.deg2rad(distance),\n station_radius + radius * 0.1,\n name, clip_on=False)\n ax.add_artist(t)\n\n ax.set_rmax(radius)\n ax.set_rmin(0.0)\n\n if legend:\n if isinstance(legend, bool):\n if 0 <= distance <= 180.0:\n loc = \"upper left\"\n else:\n loc = \"upper right\"\n else:\n loc = legend\n ax.legend(loc=loc, prop=dict(size=\"small\"))\n\n elif plot_type == \"cartesian\":\n if ax and isinstance(ax, mpl.projections.polar.PolarAxes):\n msg = (\"Axes instance provided for plotting with \"\n \"`plot_type='cartesian'` but it seems the axes is \"\n \"a polar axes.\")\n warnings.warn(msg)\n if fig and ax:\n pass\n elif not fig and not ax:\n fig, ax = plt.subplots()\n ax.invert_yaxis()\n elif not ax:\n ax = fig.add_subplot(1, 1, 1)\n ax.invert_yaxis()\n elif not fig:\n fig = ax.figure\n\n # Plot the ray paths:\n for ray in arrivals:\n if ray.name in phase_names:\n ax.plot(np.rad2deg(ray.path[\"dist\"]), ray.path[\"depth\"],\n color=COLORS[phase_names.index(ray.name) %\n len(COLORS)],\n label=ray.name, lw=2.0)\n else:\n ax.plot(np.rad2deg(ray.path[\"dist\"]), ray.path[\"depth\"],\n color='k', label=ray.name, lw=2.0)\n\n # Pretty station marker:\n ms = 14\n station_marker_transform = matplotlib.transforms.offset_copy(\n ax.transData,\n fig=ax.get_figure(),\n y=ms / 2.0,\n units=\"points\")\n ax.plot([distance], [arrivals[0].receiver_depth],\n marker=\"v\", color=\"#C95241\",\n markersize=ms, zorder=10, markeredgewidth=1.5,\n markeredgecolor=\"0.3\", clip_on=False,\n transform=station_marker_transform)\n if label_arrivals:\n name = ','.join(sorted(set(ray.name for ray in arrivals)))\n ax.annotate(name,\n xy=(distance, arrivals[0].receiver_depth),\n xytext=(0, ms * 1.5),\n textcoords='offset points',\n ha='center', annotation_clip=False)\n\n # Pretty earthquake marker.\n ax.plot([0], [arrivals[0].source_depth], marker=\"*\",\n color=\"#FEF215\", markersize=20, zorder=10,\n markeredgewidth=1.5, markeredgecolor=\"0.3\",\n clip_on=False)\n\n # lines of major discontinuities:\n x = ax.get_xlim()\n y = ax.get_ylim()\n for depth in discons:\n if not (y[1] <= depth <= y[0]):\n continue\n ax.hlines(depth, x[0], x[1], color=\"0.5\", zorder=-1)\n\n # Plot some more station markers if necessary.\n possible_distances = [_i * (distance + 360.0)\n for _i in range(1, 10)]\n possible_distances += [-_i * (360.0 - distance) for _i in\n range(1, 10)]\n possible_distances = [_i for _i in possible_distances\n if x[0] <= _i <= x[1]]\n if possible_distances:\n ax.plot(possible_distances, [arrivals[0].receiver_depth]\n * len(possible_distances),\n marker=\"v\", color=\"#C95241\",\n markersize=ms, zorder=10, markeredgewidth=1.5,\n markeredgecolor=\"0.3\", clip_on=False, lw=0,\n transform=station_marker_transform)\n if legend:\n if isinstance(legend, bool):\n loc = \"lower left\"\n else:\n loc = legend\n ax.legend(loc=loc, prop=dict(size=\"small\"))\n ax.set_xlabel(\"Distance [deg]\")\n ax.set_ylabel(\"Depth [km]\")\n else:\n msg = \"Plot type '{}' is not a valid option.\".format(plot_type)\n raise ValueError(msg)\n if show:\n plt.show()\n return ax\n\n def plot(self, plot_type=\"spherical\", plot_all=True, legend=True,\n label_arrivals=False, ax=None, show=True):\n \"\"\"\n Plot ray paths if any have been calculated.\n\n :param plot_type: Either ``\"spherical\"`` or ``\"cartesian\"``.\n A spherical plot is always global whereas a Cartesian one can\n also be local.\n :type plot_type: str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: If boolean, specify whether or not to show the legend\n (at the default location.) If a str, specify the location of the\n legend. If you are plotting a single phase, you may consider using\n the ``label_arrivals`` argument.\n :type legend: bool or str\n :param label_arrivals: Label the arrivals with their respective phase\n names. This setting is only useful if you are plotting a single\n phase as otherwise the names could be large and possibly overlap\n or clip. Consider using the ``legend`` parameter instead if you\n are plotting multiple phases.\n :type label_arrivals: bool\n :param show: Show the plot.\n :type show: bool\n :param fig: Figure to plot in. If not given, a new figure will be\n created.\n :type fig: :class:`matplotlib.figure.Figure`\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created. Must be a polar axes for the spherical plot and\n a regular one for the Cartesian plot.\n :type ax: :class:`matplotlib.axes.Axes`\n :returns: Matplotlib axes with the plot\n :rtype: :class:`matplotlib.axes.Axes`\n\n .. versionchanged:: 1.1.0\n\n Deprecated.\n\n With the introduction of plot_times(), plot() has been renamed to\n plot_rays()\n \"\"\"\n\n # display warning\n from obspy.core.util.deprecation_helpers import ObsPyDeprecationWarning\n warnings.warn(\"The plot() function is deprecated. Please use \"\n \"arrivals.plot_rays()\",\n ObsPyDeprecationWarning, stacklevel=2)\n\n # call plot_rays, but with added fig and phase_list parameters:\n return self.plot_rays(plot_type=plot_type,\n plot_all=plot_all,\n legend=legend,\n label_arrivals=label_arrivals,\n ax=ax,\n fig=None,\n show=show,\n phase_list=(\"ttall\",))\n\n\nclass TauPyModel(object):\n \"\"\"\n Representation of a seismic model and methods for ray paths through it.\n \"\"\"\n\n def __init__(self, model=\"iasp91\", verbose=False, planet_flattening=0.0,\n cache=None):\n \"\"\"\n Loads an already created TauPy model.\n\n :param model: The model name. Either an internal TauPy model or a\n filename in the case of custom models.\n :param planet_flattening: Flattening parameter for the planet's\n ellipsoid (i.e. (a-b)/a, where a is the semimajor equatorial radius\n and b is the semiminor polar radius). A value of 0 (the default)\n gives a spherical planet. Note that this is only used to convert\n from geographical positions (source and receiver latitudes and\n longitudes) to epicentral distances - the actual traveltime and\n raypath calculations are performed on a spherical planet.\n :type planet_flattening: float\n :param cache: An object to use to cache models split at source depths.\n Generating results requires splitting a model at the source depth,\n which may be expensive. The cache allows faster calculation when\n multiple results are requested for the same source depth. The\n dictionary must be ordered, otherwise the LRU cache will not\n behave correctly. If ``False`` is specified, then no cache will be\n used.\n :type cache: :class:`collections.OrderedDict` or bool\n\n Usage:\n\n >>> from obspy.taup import tau\n >>> i91 = tau.TauPyModel()\n >>> print(i91.get_travel_times(10, 20)[0].name)\n P\n >>> i91.get_travel_times(10, 20)[0].time # doctest: +ELLIPSIS\n 272.675...\n >>> len(i91.get_travel_times(100, 50, phase_list = [\"P\", \"S\"]))\n 2\n \"\"\"\n self.verbose = verbose\n self.model = TauModel.from_file(model, cache=cache)\n self.planet_flattening = planet_flattening\n\n def get_travel_times(self, source_depth_in_km, distance_in_degree=None,\n phase_list=(\"ttall\",), receiver_depth_in_km=0.0):\n \"\"\"\n Return travel times of every given phase.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param distance_in_degree: Epicentral distance in degrees.\n :type distance_in_degree: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :param receiver_depth_in_km: Receiver depth in km\n :type receiver_depth_in_km: float\n\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n # Accessing the arrivals not just by list indices but by phase name\n # might be useful, but also difficult: several arrivals can have the\n # same phase.\n tt = TauPTime(self.model, phase_list, source_depth_in_km,\n distance_in_degree, receiver_depth_in_km)\n tt.run()\n return Arrivals(sorted(tt.arrivals, key=lambda x: x.time),\n model=self.model)\n\n def get_pierce_points(self, source_depth_in_km, distance_in_degree,\n phase_list=(\"ttall\",), receiver_depth_in_km=0.0):\n \"\"\"\n Return pierce points of every given phase.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param distance_in_degree: Epicentral distance in degrees.\n :type distance_in_degree: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :param receiver_depth_in_km: Receiver depth in km\n :type receiver_depth_in_km: float\n\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n pp = TauPPierce(self.model, phase_list, source_depth_in_km,\n distance_in_degree, receiver_depth_in_km)\n pp.run()\n return Arrivals(sorted(pp.arrivals, key=lambda x: x.time),\n model=self.model)\n\n def get_ray_paths(self, source_depth_in_km, distance_in_degree=None,\n phase_list=(\"ttall\",), receiver_depth_in_km=0.0):\n \"\"\"\n Return ray paths of every given phase.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param distance_in_degree: Epicentral distance in degrees.\n :type distance_in_degree: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :param receiver_depth_in_km: Receiver depth in km\n :type receiver_depth_in_km: float\n\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n rp = TauPPath(self.model, phase_list, source_depth_in_km,\n distance_in_degree, receiver_depth_in_km)\n rp.run()\n return Arrivals(sorted(rp.arrivals, key=lambda x: x.time),\n model=self.model)\n\n def get_travel_times_geo(self, source_depth_in_km, source_latitude_in_deg,\n source_longitude_in_deg, receiver_latitude_in_deg,\n receiver_longitude_in_deg, phase_list=(\"ttall\",)):\n \"\"\"\n Return travel times of every given phase given geographical data.\n\n .. note::\n\n Note that the conversion from source and receiver latitudes and\n longitudes to epicentral distances respects the model's flattening\n parameter, so this calculation can be performed for a ellipsoidal\n or spherical planet. However, the actual traveltime and raypath\n calculations are performed on a spherical planet. Ellipticity\n corrections of e.g. [Dziewonski1976]_ are not made.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param source_latitude_in_deg: Source latitude in degrees\n :type source_latitude_in_deg: float\n :param source_longitude_in_deg: Source longitude in degrees\n :type source_longitude_in_deg: float\n :param receiver_latitude_in_deg: Receiver latitude in degrees\n :type receiver_latitude_in_deg: float\n :param receiver_longitude_in_deg: Receiver longitude in degrees\n :type receiver_longitude_in_deg: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n distance_in_deg = calc_dist(source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening)\n arrivals = self.get_travel_times(source_depth_in_km, distance_in_deg,\n phase_list)\n return arrivals\n\n def get_pierce_points_geo(self, source_depth_in_km, source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n phase_list=(\"ttall\",),\n resample=False):\n \"\"\"\n Return ray paths of every given phase with geographical info.\n\n .. note::\n\n Note that the conversion from source and receiver latitudes and\n longitudes to epicentral distances respects the model's flattening\n parameter, so this calculation can be performed for a ellipsoidal\n or spherical planet. However, the actual traveltime and raypath\n calculations are performed on a spherical planet. Ellipticity\n corrections of e.g. [Dziewonski1976]_ are not made.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param source_latitude_in_deg: Source latitude in degrees\n :type source_latitude_in_deg: float\n :param source_longitude_in_deg: Source longitue in degrees\n :type source_longitude_in_deg: float\n :param receiver_latitude_in_deg: Receiver latitude in degrees\n :type receiver_latitude_in_deg: float\n :param receiver_longitude_in_deg: Receiver longitude in degrees\n :type receiver_longitude_in_deg: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :param resample: adds sample points to allow for easy cartesian\n interpolation. This is especially useful for phases\n like Pdiff.\n :type resample: boolean\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n distance_in_deg = calc_dist(source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening)\n\n arrivals = self.get_pierce_points(source_depth_in_km, distance_in_deg,\n phase_list)\n\n if geodetics.HAS_GEOGRAPHICLIB:\n arrivals = add_geo_to_arrivals(arrivals, source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening,\n resample=resample)\n else:\n msg = \"Not able to evaluate positions of pierce points. \" + \\\n \"Arrivals object will not be modified. \" + \\\n \"Install the Python module 'geographiclib' to solve \" + \\\n \"this issue.\"\n warnings.warn(msg)\n\n return arrivals\n\n def get_ray_paths_geo(self, source_depth_in_km, source_latitude_in_deg,\n source_longitude_in_deg, receiver_latitude_in_deg,\n receiver_longitude_in_deg, phase_list=(\"ttall\",),\n resample=False):\n \"\"\"\n Return ray paths of every given phase with geographical info.\n\n .. note::\n\n Note that the conversion from source and receiver latitudes and\n longitudes to epicentral distances respects the model's flattening\n parameter, so this calculation can be performed for a ellipsoidal\n or spherical planet. However, the actual traveltime and raypath\n calculations are performed on a spherical planet. Ellipticity\n corrections of e.g. [Dziewonski1976]_ are not made.\n\n :param source_depth_in_km: Source depth in km\n :type source_depth_in_km: float\n :param source_latitude_in_deg: Source latitude in degrees\n :type source_latitude_in_deg: float\n :param source_longitude_in_deg: Source longitue in degrees\n :type source_longitude_in_deg: float\n :param receiver_latitude_in_deg: Receiver latitude in degrees\n :type receiver_latitude_in_deg: float\n :param receiver_longitude_in_deg: Receiver longitude in degrees\n :type receiver_longitude_in_deg: float\n :param phase_list: List of phases for which travel times should be\n calculated. If this is empty, all phases in arrivals object\n will be used.\n :type phase_list: list of str\n :return: List of ``Arrival`` objects, each of which has the time,\n corresponding phase name, ray parameter, takeoff angle, etc. as\n attributes.\n :rtype: :class:`Arrivals`\n \"\"\"\n distance_in_deg = calc_dist(source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening)\n\n arrivals = self.get_ray_paths(source_depth_in_km, distance_in_deg,\n phase_list)\n\n if geodetics.HAS_GEOGRAPHICLIB:\n arrivals = add_geo_to_arrivals(arrivals, source_latitude_in_deg,\n source_longitude_in_deg,\n receiver_latitude_in_deg,\n receiver_longitude_in_deg,\n self.model.radius_of_planet,\n self.planet_flattening,\n resample=resample)\n else:\n msg = \"Not able to evaluate positions of points on path. \" + \\\n \"Arrivals object will not be modified. \" + \\\n \"Install the Python module 'geographiclib' to solve \" + \\\n \"this issue.\"\n warnings.warn(msg)\n\n return arrivals\n\n\ndef create_taup_model(model_name, output_dir, input_dir):\n \"\"\"\n Create a .taup model from a .tvel file.\n\n :param model_name:\n :param output_dir:\n \"\"\"\n if \".\" in model_name:\n model_file_name = model_name\n else:\n model_file_name = model_name + \".tvel\"\n TauPCreate.main(model_file_name, output_dir, input_dir)\n\n\ndef plot_travel_times(source_depth, phase_list=(\"ttbasic\",), min_degrees=0,\n max_degrees=180, npoints=50, model='iasp91',\n plot_all=True, legend=True, verbose=False, fig=None,\n ax=None, show=True):\n \"\"\"\n Returns a travel time plot and any created axis instance of this\n plot.\n\n :param source_depth: Source depth in kilometers.\n :type source_depth: float\n :param min_degrees: minimum distance from the source (in degrees)\n :type min_degrees: float\n :param max_degrees: maximum distance from the source (in degrees)\n :type max_degrees: float\n :param npoints: Number of points to plot.\n :type npoints: int\n :param phase_list: List of phase names to plot.\n :type phase_list: list of str, optional\n :param model: string containing the model to use.\n :type model: str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: Whether or not to show the legend\n :type legend: bool\n :param verbose: Whether to print information about epicentral distances\n that did not have an arrival.\n :type verbose: bool\n :param fig: Figure to plot in. If not given, a new figure instance\n will be created.\n :type fig: :class:`matplotlib.axes.Axes\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created.\n param show: Show the plot.\n type show: bool\n :type ax: :class:`matplotlib.Figure.figure`\n :returns: ax\n :rtype: :class:`matplotlib.axes.Axes`\n\n .. rubric:: Example\n\n >>> from obspy.taup import plot_travel_times\n >>> ax = plot_travel_times(source_depth=10, phase_list=['P', 'S', 'PP'])\n\n .. plot::\n\n from obspy.taup import plot_travel_times\n ax = plot_travel_times(source_depth=10, phase_list=['P','S','PP'])\n \"\"\"\n import matplotlib.pyplot as plt\n\n # compute the requested arrivals:\n if not isinstance(model, TauPyModel):\n model = TauPyModel(model)\n\n # a list of epicentral distances without a travel time, and a flag:\n notimes = []\n plotted = False\n\n # calculate the arrival times and plot vs. epicentral distance:\n degrees = np.linspace(min_degrees, max_degrees, npoints)\n for degree in degrees:\n try:\n arrivals = model.get_ray_paths(source_depth, degree,\n phase_list=phase_list)\n ax = arrivals.plot_times(phase_list=phase_list, show=False,\n ax=ax, plot_all=plot_all)\n plotted = True\n except ValueError:\n notimes.append(degree)\n\n if plotted:\n if verbose:\n if len(notimes) == 1:\n tmpl = \"There was {} epicentral distance without an arrival\"\n else:\n tmpl = \"There were {} epicentral distances without an arrival\"\n print(tmpl.format(len(notimes)))\n else:\n raise ValueError(\"No arrival times to plot.\")\n\n if legend:\n # merge all arrival labels of a certain phase:\n handles, labels = ax.get_legend_handles_labels()\n labels, ids = np.unique(labels, return_index=True)\n handles = [handles[i] for i in ids]\n ax.legend(handles, labels, loc=2, numpoints=1)\n\n if show:\n plt.show()\n return ax\n\n\ndef plot_ray_paths(source_depth, min_degrees=0, max_degrees=360, npoints=10,\n plot_type='spherical', phase_list=['P', 'S', 'PP'],\n model='iasp91', plot_all=True, legend=False,\n label_arrivals=False, verbose=False, fig=None, show=True,\n ax=None):\n \"\"\"\n Plot ray paths for seismic phases.\n\n :param source_depth: Source depth in kilometers.\n :type source_depth: float\n :param min_degrees: minimum distance from the source (in degrees).\n :type min_degrees: float\n :param max_degrees: maximum distance from the source (in degrees).\n :type max_degrees: float\n :param npoints: Number of receivers to plot.\n :type npoints: int\n :param plot_type: type of plot to create.\n :type plot_type: str\n :param phase_list: List of phase names.\n :type phase_list: list of str\n :param model: Model name.\n :type model: str\n :param plot_all: By default all rays, even those travelling in the\n other direction and thus arriving at a distance of *360 - x*\n degrees are shown. Set this to ``False`` to only show rays\n arriving at exactly *x* degrees.\n :type plot_all: bool\n :param legend: Whether or not to show the legend\n :type legend: bool\n :param label_arrivals: Label the arrivals with their respective phase\n names. This setting is only useful if you are plotting a single\n phase as otherwise the names could be large and possibly overlap\n or clip. Consider using the ``legend`` parameter instead if you\n are plotting multiple phases.\n :type label_arrivals: bool\n :param verbose: Whether to print information about selected phases that\n were not encountered at individual epicentral distances.\n :type verbose: bool\n :param fig: Figure to plot into. If not given, a new figure instance\n will be created.\n :type fig: :class:`matplotlib.figure.Figure`\n :param show: Show the plot.\n :type show: bool\n :param ax: Axes to plot in. If not given, a new figure with an axes\n will be created.\n :type ax: :class:`matplotlib.axes.Axes`\n :returns: Matplotlib axes with the plot\n :rtype: :class:`matplotlib.axes.Axes`\n\n .. rubric:: Example\n\n >>> from obspy.taup.tau import plot_ray_paths\n >>> import matplotlib.pyplot as plt\n >>> fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(polar=True))\n >>> ax = plot_ray_paths(source_depth=10, plot_type=\"spherical\",\n ... ax=ax, fig=fig, legend=True,\n ... phase_list=['P', 'S', 'PP'], verbose=True)\n There were rays for all but the following epicentral distances:\n [0.0, 360.0]\n\n .. plot::\n\n from obspy.taup.tau import plot_ray_paths\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(polar=True))\n ax = plot_ray_paths(source_depth=10, plot_type=\"spherical\",\n ax=ax, fig=fig, legend=True,\n phase_list=['P','S','PP'])\n \"\"\"\n import matplotlib.pyplot as plt\n model = TauPyModel(model)\n\n # set up a list of epicentral distances without a ray, and a flag:\n norays = []\n plotted = False\n\n # calculate the arrival times and plot vs. epicentral distance:\n degrees = np.linspace(min_degrees, max_degrees, npoints)\n for degree in degrees:\n try:\n arrivals = model.get_ray_paths(source_depth, degree,\n phase_list=phase_list)\n ax = arrivals.plot_rays(phase_list=phase_list, show=False,\n ax=ax, plot_type=plot_type,\n plot_all=plot_all, legend=False)\n plotted = True\n except ValueError:\n norays.append(degree)\n\n if plotted:\n if verbose:\n print(\"There were rays for all but the following epicentral \"\n \"distances:\\n\", norays)\n else:\n raise ValueError(\"No ray paths to plot.\")\n\n if legend:\n # merge all arrival labels of a certain phase:\n handles, labels = ax.get_legend_handles_labels()\n labels, ids = np.unique(labels, return_index=True)\n handles = [handles[i] for i in ids]\n ax.legend(handles, labels, loc=2, numpoints=1)\n\n if show:\n plt.show()\n return ax\n", "path": "obspy/taup/tau.py" } ]
diff --git a/obspy/taup/tau.py b/obspy/taup/tau.py index b4def9ddf5e..7ab4f103175 100644 --- a/obspy/taup/tau.py +++ b/obspy/taup/tau.py @@ -939,7 +939,8 @@ def plot_travel_times(source_depth, phase_list=("ttbasic",), min_degrees=0, import matplotlib.pyplot as plt # compute the requested arrivals: - model = TauPyModel(model) + if not isinstance(model, TauPyModel): + model = TauPyModel(model) # a list of epicentral distances without a travel time, and a flag: notimes = []
mitmproxy__mitmproxy-6587
mitmproxy: 10.2: WireGuard Mode Does Not Receive Connections #### Problem Description It looks like commit 6e38a56f4 Thu Jan 4 10:53:13 2024 +0100 implement UDP streams (and all future commits) has changed/broken something in the wireguard proxy mode. Prior commits work as expected. #### Steps to reproduce the behavior: 1. Build 6e38a56f4 or tag 10.2.0 2. ./mitmdump --set block_global=false --mode 'wireguard:/home/admin/.mitmproxy/wireguard.conf' 3. Then curl or visit web page from client running wireguard client , mitmproxy server does not accept the request When mitmdump (version 6e38a56f4 or tag 10.2.0) starts the following is printed ``` ./mitmdump --set block_global=false --mode 'wireguard:/home/admin/.mitmproxy/wireguard.conf' [23:47:56.530] ------------------------------------------------------------ [Interface] PrivateKey = ... Address = 10.0.0.1/32 DNS = 10.0.0.53 [Peer] PublicKey = ... AllowedIPs = 0.0.0.0/0 Endpoint = a.b.c.d:60000 ------------------------------------------------------------ [23:47:56.530] WireGuard server listening at 127.0.0.1:60000. ``` #### System Information ./mitmdump --version Mitmproxy: 11.0.0.dev (+9, commit 6e38a56) Python: 3.11.2 OpenSSL: OpenSSL 3.1.2 1 Aug 2023 Platform: Linux-6.1.0-15-cloud-arm64-aarch64-with-glibc2.36 When commit ed532e927 is run the following is logged - this is the expected output and in this case mitmproxy works as expected in wg mode. ``` ./mitmdump --set block_global=false --mode 'wireguard:/home/admin/.mitmproxy/wireguard.conf' [23:45:25.620] Initializing WireGuard server ... [23:45:25.621] WireGuard server listening for UDP connections on 0.0.0.0:60000 and [::]:60000 ... [23:45:25.621] WireGuard server successfully initialized. [23:45:25.622] ------------------------------------------------------------ [Interface] PrivateKey = ... Address = 10.0.0.1/32 DNS = 10.0.0.53 [Peer] PublicKey = ... AllowedIPs = 0.0.0.0/0 Endpoint = a.b.c.d:60000 ------------------------------------------------------------ [23:45:25.622] WireGuard server listening at *:60000. [23:45:26.335][10.0.0.2:52756] client connect ...usual logs... ``` #### System Information Mitmproxy: 11.0.0.dev (+8, commit ed532e9) Python: 3.11.2 OpenSSL: OpenSSL 3.1.2 1 Aug 2023 Platform: Linux-6.1.0-15-cloud-arm64-aarch64-with-glibc2.36
[ { "content": "\"\"\"\nThis module defines \"server instances\", which manage\nthe TCP/UDP servers spawned by mitmproxy as specified by the proxy mode.\n\nExample:\n\n mode = ProxyMode.parse(\"reverse:https://example.com\")\n inst = ServerInstance.make(mode, manager_that_handles_callbacks)\n await inst.start()\n # TCP server is running now.\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport errno\nimport json\nimport logging\nimport os\nimport socket\nimport sys\nimport textwrap\nimport typing\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import cast\nfrom typing import ClassVar\nfrom typing import Generic\nfrom typing import get_args\nfrom typing import TYPE_CHECKING\nfrom typing import TypeVar\n\nimport mitmproxy_rs\n\nfrom mitmproxy import ctx\nfrom mitmproxy import flow\nfrom mitmproxy import platform\nfrom mitmproxy.connection import Address\nfrom mitmproxy.net import local_ip\nfrom mitmproxy.proxy import commands\nfrom mitmproxy.proxy import layers\nfrom mitmproxy.proxy import mode_specs\nfrom mitmproxy.proxy import server\nfrom mitmproxy.proxy.context import Context\nfrom mitmproxy.proxy.layer import Layer\nfrom mitmproxy.utils import human\n\nif sys.version_info < (3, 11):\n from typing_extensions import Self # pragma: no cover\nelse:\n from typing import Self\n\nif TYPE_CHECKING:\n from mitmproxy.master import Master\n\nlogger = logging.getLogger(__name__)\n\n\nclass ProxyConnectionHandler(server.LiveConnectionHandler):\n master: Master\n\n def __init__(self, master, r, w, options, mode):\n self.master = master\n super().__init__(r, w, options, mode)\n self.log_prefix = f\"{human.format_address(self.client.peername)}: \"\n\n async def handle_hook(self, hook: commands.StartHook) -> None:\n with self.timeout_watchdog.disarm():\n # We currently only support single-argument hooks.\n (data,) = hook.args()\n await self.master.addons.handle_lifecycle(hook)\n if isinstance(data, flow.Flow):\n await data.wait_for_resume() # pragma: no cover\n\n\nM = TypeVar(\"M\", bound=mode_specs.ProxyMode)\n\n\nclass ServerManager(typing.Protocol):\n # temporary workaround: for UDP, we use the 4-tuple because we don't have a uuid.\n connections: dict[tuple | str, ProxyConnectionHandler]\n\n @contextmanager\n def register_connection(\n self, connection_id: tuple | str, handler: ProxyConnectionHandler\n ):\n ... # pragma: no cover\n\n\nclass ServerInstance(Generic[M], metaclass=ABCMeta):\n __modes: ClassVar[dict[str, type[ServerInstance]]] = {}\n\n last_exception: Exception | None = None\n\n def __init__(self, mode: M, manager: ServerManager):\n self.mode: M = mode\n self.manager: ServerManager = manager\n\n def __init_subclass__(cls, **kwargs):\n \"\"\"Register all subclasses so that make() finds them.\"\"\"\n # extract mode from Generic[Mode].\n mode = get_args(cls.__orig_bases__[0])[0] # type: ignore\n if not isinstance(mode, TypeVar):\n assert issubclass(mode, mode_specs.ProxyMode)\n assert mode.type_name not in ServerInstance.__modes\n ServerInstance.__modes[mode.type_name] = cls\n\n @classmethod\n def make(\n cls,\n mode: mode_specs.ProxyMode | str,\n manager: ServerManager,\n ) -> Self:\n if isinstance(mode, str):\n mode = mode_specs.ProxyMode.parse(mode)\n inst = ServerInstance.__modes[mode.type_name](mode, manager)\n\n if not isinstance(inst, cls):\n raise ValueError(f\"{mode!r} is not a spec for a {cls.__name__} server.\")\n\n return inst\n\n @property\n @abstractmethod\n def is_running(self) -> bool:\n pass\n\n async def start(self) -> None:\n try:\n await self._start()\n except Exception as e:\n self.last_exception = e\n raise\n else:\n self.last_exception = None\n if self.listen_addrs:\n addrs = \" and \".join({human.format_address(a) for a in self.listen_addrs})\n logger.info(f\"{self.mode.description} listening at {addrs}.\")\n else:\n logger.info(f\"{self.mode.description} started.\")\n\n async def stop(self) -> None:\n listen_addrs = self.listen_addrs\n try:\n await self._stop()\n except Exception as e:\n self.last_exception = e\n raise\n else:\n self.last_exception = None\n if listen_addrs:\n addrs = \" and \".join({human.format_address(a) for a in listen_addrs})\n logger.info(f\"{self.mode.description} at {addrs} stopped.\")\n else:\n logger.info(f\"{self.mode.description} stopped.\")\n\n @abstractmethod\n async def _start(self) -> None:\n pass\n\n @abstractmethod\n async def _stop(self) -> None:\n pass\n\n @property\n @abstractmethod\n def listen_addrs(self) -> tuple[Address, ...]:\n pass\n\n @abstractmethod\n def make_top_layer(self, context: Context) -> Layer:\n pass\n\n def to_json(self) -> dict:\n return {\n \"type\": self.mode.type_name,\n \"description\": self.mode.description,\n \"full_spec\": self.mode.full_spec,\n \"is_running\": self.is_running,\n \"last_exception\": str(self.last_exception) if self.last_exception else None,\n \"listen_addrs\": self.listen_addrs,\n }\n\n async def handle_stream(\n self,\n reader: asyncio.StreamReader | mitmproxy_rs.Stream,\n writer: asyncio.StreamWriter | mitmproxy_rs.Stream,\n ) -> None:\n handler = ProxyConnectionHandler(\n ctx.master, reader, writer, ctx.options, self.mode\n )\n handler.layer = self.make_top_layer(handler.layer.context)\n if isinstance(self.mode, mode_specs.TransparentMode):\n assert isinstance(writer, asyncio.StreamWriter)\n s = cast(socket.socket, writer.get_extra_info(\"socket\"))\n try:\n assert platform.original_addr\n original_dst = platform.original_addr(s)\n except Exception as e:\n logger.error(f\"Transparent mode failure: {e!r}\")\n writer.close()\n return\n else:\n handler.layer.context.client.sockname = original_dst\n handler.layer.context.server.address = original_dst\n elif isinstance(self.mode, (mode_specs.WireGuardMode, mode_specs.LocalMode)):\n handler.layer.context.server.address = writer.get_extra_info(\n \"remote_endpoint\", handler.layer.context.client.sockname\n )\n\n with self.manager.register_connection(handler.layer.context.client.id, handler):\n await handler.handle_client()\n\n async def handle_udp_stream(self, stream: mitmproxy_rs.Stream) -> None:\n await self.handle_stream(stream, stream)\n\n\nclass AsyncioServerInstance(ServerInstance[M], metaclass=ABCMeta):\n _servers: list[asyncio.Server | mitmproxy_rs.UdpServer]\n\n def __init__(self, *args, **kwargs) -> None:\n self._servers = []\n super().__init__(*args, **kwargs)\n\n @property\n def is_running(self) -> bool:\n return bool(self._servers)\n\n @property\n def listen_addrs(self) -> tuple[Address, ...]:\n addrs = []\n for s in self._servers:\n if isinstance(s, mitmproxy_rs.UdpServer):\n addrs.append(s.getsockname())\n else:\n try:\n addrs.extend(sock.getsockname() for sock in s.sockets)\n except OSError: # pragma: no cover\n pass # this can fail during shutdown, see https://github.com/mitmproxy/mitmproxy/issues/6529\n return tuple(addrs)\n\n async def _start(self) -> None:\n assert not self._servers\n host = self.mode.listen_host(ctx.options.listen_host)\n port = self.mode.listen_port(ctx.options.listen_port)\n try:\n self._servers = await self.listen(host, port)\n except OSError as e:\n message = f\"{self.mode.description} failed to listen on {host or '*'}:{port} with {e}\"\n if e.errno == errno.EADDRINUSE and self.mode.custom_listen_port is None:\n assert (\n self.mode.custom_listen_host is None\n ) # since [@ [listen_addr:]listen_port]\n message += f\"\\nTry specifying a different port by using `--mode {self.mode.full_spec}@{port + 2}`.\"\n raise OSError(e.errno, message, e.filename) from e\n\n async def _stop(self) -> None:\n assert self._servers\n try:\n for s in self._servers:\n s.close()\n # https://github.com/python/cpython/issues/104344\n # await asyncio.gather(*[s.wait_closed() for s in self._servers])\n finally:\n # we always reset _server and ignore failures\n self._servers = []\n\n async def listen(\n self, host: str, port: int\n ) -> list[asyncio.Server | mitmproxy_rs.UdpServer]:\n if self.mode.transport_protocol == \"tcp\":\n # workaround for https://github.com/python/cpython/issues/89856:\n # We want both IPv4 and IPv6 sockets to bind to the same port.\n # This may fail (https://github.com/mitmproxy/mitmproxy/pull/5542#issuecomment-1222803291),\n # so we try to cover the 99% case and then give up and fall back to what asyncio does.\n if port == 0:\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n fixed_port = s.getsockname()[1]\n s.close()\n return [\n await asyncio.start_server(self.handle_stream, host, fixed_port)\n ]\n except Exception as e:\n logger.debug(\n f\"Failed to listen on a single port ({e!r}), falling back to default behavior.\"\n )\n return [await asyncio.start_server(self.handle_stream, host, port)]\n elif self.mode.transport_protocol == \"udp\":\n # we start two servers for dual-stack support.\n # On Linux, this would also be achievable by toggling IPV6_V6ONLY off, but this here works cross-platform.\n if host == \"\":\n ipv4 = await mitmproxy_rs.start_udp_server(\n \"0.0.0.0\",\n port,\n self.handle_udp_stream,\n )\n try:\n ipv6 = await mitmproxy_rs.start_udp_server(\n \"::\",\n ipv4.getsockname()[1],\n self.handle_udp_stream,\n )\n except Exception: # pragma: no cover\n logger.debug(\"Failed to listen on '::', listening on IPv4 only.\")\n return [ipv4]\n else: # pragma: no cover\n return [ipv4, ipv6]\n return [\n await mitmproxy_rs.start_udp_server(\n host,\n port,\n self.handle_udp_stream,\n )\n ]\n else:\n raise AssertionError(self.mode.transport_protocol)\n\n\nclass WireGuardServerInstance(ServerInstance[mode_specs.WireGuardMode]):\n _server: mitmproxy_rs.WireGuardServer | None = None\n\n server_key: str\n client_key: str\n\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.TransparentProxy(context)\n\n @property\n def is_running(self) -> bool:\n return self._server is not None\n\n @property\n def listen_addrs(self) -> tuple[Address, ...]:\n if self._server:\n return (self._server.getsockname(),)\n else:\n return tuple()\n\n async def _start(self) -> None:\n assert self._server is None\n host = self.mode.listen_host(ctx.options.listen_host)\n port = self.mode.listen_port(ctx.options.listen_port)\n\n if self.mode.data:\n conf_path = Path(self.mode.data).expanduser()\n else:\n conf_path = Path(ctx.options.confdir).expanduser() / \"wireguard.conf\"\n\n if not conf_path.exists():\n conf_path.parent.mkdir(parents=True, exist_ok=True)\n conf_path.write_text(\n json.dumps(\n {\n \"server_key\": mitmproxy_rs.genkey(),\n \"client_key\": mitmproxy_rs.genkey(),\n },\n indent=4,\n )\n )\n\n try:\n c = json.loads(conf_path.read_text())\n self.server_key = c[\"server_key\"]\n self.client_key = c[\"client_key\"]\n except Exception as e:\n raise ValueError(f\"Invalid configuration file ({conf_path}): {e}\") from e\n # error early on invalid keys\n p = mitmproxy_rs.pubkey(self.client_key)\n _ = mitmproxy_rs.pubkey(self.server_key)\n\n self._server = await mitmproxy_rs.start_wireguard_server(\n host or \"127.0.0.1\",\n port,\n self.server_key,\n [p],\n self.wg_handle_stream,\n self.wg_handle_stream,\n )\n\n conf = self.client_conf()\n assert conf\n logger.info(\"-\" * 60 + \"\\n\" + conf + \"\\n\" + \"-\" * 60)\n\n def client_conf(self) -> str | None:\n if not self._server:\n return None\n host = local_ip.get_local_ip() or local_ip.get_local_ip6()\n port = self.mode.listen_port(ctx.options.listen_port)\n return textwrap.dedent(\n f\"\"\"\n [Interface]\n PrivateKey = {self.client_key}\n Address = 10.0.0.1/32\n DNS = 10.0.0.53\n\n [Peer]\n PublicKey = {mitmproxy_rs.pubkey(self.server_key)}\n AllowedIPs = 0.0.0.0/0\n Endpoint = {host}:{port}\n \"\"\"\n ).strip()\n\n def to_json(self) -> dict:\n return {\"wireguard_conf\": self.client_conf(), **super().to_json()}\n\n async def _stop(self) -> None:\n assert self._server is not None\n try:\n self._server.close()\n await self._server.wait_closed()\n finally:\n self._server = None\n\n async def wg_handle_stream(self, stream: mitmproxy_rs.Stream) -> None:\n await self.handle_stream(stream, stream)\n\n\nclass LocalRedirectorInstance(ServerInstance[mode_specs.LocalMode]):\n _server: ClassVar[mitmproxy_rs.LocalRedirector | None] = None\n \"\"\"The local redirector daemon. Will be started once and then reused for all future instances.\"\"\"\n _instance: ClassVar[LocalRedirectorInstance | None] = None\n \"\"\"The current LocalRedirectorInstance. Will be unset again if an instance is stopped.\"\"\"\n listen_addrs = ()\n\n @property\n def is_running(self) -> bool:\n return self._instance is not None\n\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.TransparentProxy(context)\n\n @classmethod\n async def redirector_handle_stream(\n cls,\n stream: mitmproxy_rs.Stream,\n ) -> None:\n if cls._instance is not None:\n await cls._instance.handle_stream(stream, stream)\n\n async def _start(self) -> None:\n if self._instance:\n raise RuntimeError(\"Cannot spawn more than one local redirector.\")\n\n if self.mode.data.startswith(\"!\"):\n spec = f\"{self.mode.data},{os.getpid()}\"\n elif self.mode.data:\n spec = self.mode.data\n else:\n spec = f\"!{os.getpid()}\"\n\n cls = self.__class__\n cls._instance = self # assign before awaiting to avoid races\n if cls._server is None:\n try:\n cls._server = await mitmproxy_rs.start_local_redirector(\n cls.redirector_handle_stream,\n cls.redirector_handle_stream,\n )\n except Exception:\n cls._instance = None\n raise\n\n cls._server.set_intercept(spec)\n\n async def _stop(self) -> None:\n assert self._instance\n assert self._server\n self.__class__._instance = None\n # We're not shutting down the server because we want to avoid additional UAC prompts.\n self._server.set_intercept(\"\")\n\n\nclass RegularInstance(AsyncioServerInstance[mode_specs.RegularMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.HttpProxy(context)\n\n\nclass UpstreamInstance(AsyncioServerInstance[mode_specs.UpstreamMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.HttpUpstreamProxy(context)\n\n\nclass TransparentInstance(AsyncioServerInstance[mode_specs.TransparentMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.TransparentProxy(context)\n\n\nclass ReverseInstance(AsyncioServerInstance[mode_specs.ReverseMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.ReverseProxy(context)\n\n\nclass Socks5Instance(AsyncioServerInstance[mode_specs.Socks5Mode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.Socks5Proxy(context)\n\n\nclass DnsInstance(AsyncioServerInstance[mode_specs.DnsMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.DNSLayer(context)\n\n\n# class Http3Instance(AsyncioServerInstance[mode_specs.Http3Mode]):\n# def make_top_layer(self, context: Context) -> Layer:\n# return layers.modes.HttpProxy(context)\n", "path": "mitmproxy/proxy/mode_servers.py" } ]
[ { "content": "\"\"\"\nThis module defines \"server instances\", which manage\nthe TCP/UDP servers spawned by mitmproxy as specified by the proxy mode.\n\nExample:\n\n mode = ProxyMode.parse(\"reverse:https://example.com\")\n inst = ServerInstance.make(mode, manager_that_handles_callbacks)\n await inst.start()\n # TCP server is running now.\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport errno\nimport json\nimport logging\nimport os\nimport socket\nimport sys\nimport textwrap\nimport typing\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import cast\nfrom typing import ClassVar\nfrom typing import Generic\nfrom typing import get_args\nfrom typing import TYPE_CHECKING\nfrom typing import TypeVar\n\nimport mitmproxy_rs\n\nfrom mitmproxy import ctx\nfrom mitmproxy import flow\nfrom mitmproxy import platform\nfrom mitmproxy.connection import Address\nfrom mitmproxy.net import local_ip\nfrom mitmproxy.proxy import commands\nfrom mitmproxy.proxy import layers\nfrom mitmproxy.proxy import mode_specs\nfrom mitmproxy.proxy import server\nfrom mitmproxy.proxy.context import Context\nfrom mitmproxy.proxy.layer import Layer\nfrom mitmproxy.utils import human\n\nif sys.version_info < (3, 11):\n from typing_extensions import Self # pragma: no cover\nelse:\n from typing import Self\n\nif TYPE_CHECKING:\n from mitmproxy.master import Master\n\nlogger = logging.getLogger(__name__)\n\n\nclass ProxyConnectionHandler(server.LiveConnectionHandler):\n master: Master\n\n def __init__(self, master, r, w, options, mode):\n self.master = master\n super().__init__(r, w, options, mode)\n self.log_prefix = f\"{human.format_address(self.client.peername)}: \"\n\n async def handle_hook(self, hook: commands.StartHook) -> None:\n with self.timeout_watchdog.disarm():\n # We currently only support single-argument hooks.\n (data,) = hook.args()\n await self.master.addons.handle_lifecycle(hook)\n if isinstance(data, flow.Flow):\n await data.wait_for_resume() # pragma: no cover\n\n\nM = TypeVar(\"M\", bound=mode_specs.ProxyMode)\n\n\nclass ServerManager(typing.Protocol):\n # temporary workaround: for UDP, we use the 4-tuple because we don't have a uuid.\n connections: dict[tuple | str, ProxyConnectionHandler]\n\n @contextmanager\n def register_connection(\n self, connection_id: tuple | str, handler: ProxyConnectionHandler\n ):\n ... # pragma: no cover\n\n\nclass ServerInstance(Generic[M], metaclass=ABCMeta):\n __modes: ClassVar[dict[str, type[ServerInstance]]] = {}\n\n last_exception: Exception | None = None\n\n def __init__(self, mode: M, manager: ServerManager):\n self.mode: M = mode\n self.manager: ServerManager = manager\n\n def __init_subclass__(cls, **kwargs):\n \"\"\"Register all subclasses so that make() finds them.\"\"\"\n # extract mode from Generic[Mode].\n mode = get_args(cls.__orig_bases__[0])[0] # type: ignore\n if not isinstance(mode, TypeVar):\n assert issubclass(mode, mode_specs.ProxyMode)\n assert mode.type_name not in ServerInstance.__modes\n ServerInstance.__modes[mode.type_name] = cls\n\n @classmethod\n def make(\n cls,\n mode: mode_specs.ProxyMode | str,\n manager: ServerManager,\n ) -> Self:\n if isinstance(mode, str):\n mode = mode_specs.ProxyMode.parse(mode)\n inst = ServerInstance.__modes[mode.type_name](mode, manager)\n\n if not isinstance(inst, cls):\n raise ValueError(f\"{mode!r} is not a spec for a {cls.__name__} server.\")\n\n return inst\n\n @property\n @abstractmethod\n def is_running(self) -> bool:\n pass\n\n async def start(self) -> None:\n try:\n await self._start()\n except Exception as e:\n self.last_exception = e\n raise\n else:\n self.last_exception = None\n if self.listen_addrs:\n addrs = \" and \".join({human.format_address(a) for a in self.listen_addrs})\n logger.info(f\"{self.mode.description} listening at {addrs}.\")\n else:\n logger.info(f\"{self.mode.description} started.\")\n\n async def stop(self) -> None:\n listen_addrs = self.listen_addrs\n try:\n await self._stop()\n except Exception as e:\n self.last_exception = e\n raise\n else:\n self.last_exception = None\n if listen_addrs:\n addrs = \" and \".join({human.format_address(a) for a in listen_addrs})\n logger.info(f\"{self.mode.description} at {addrs} stopped.\")\n else:\n logger.info(f\"{self.mode.description} stopped.\")\n\n @abstractmethod\n async def _start(self) -> None:\n pass\n\n @abstractmethod\n async def _stop(self) -> None:\n pass\n\n @property\n @abstractmethod\n def listen_addrs(self) -> tuple[Address, ...]:\n pass\n\n @abstractmethod\n def make_top_layer(self, context: Context) -> Layer:\n pass\n\n def to_json(self) -> dict:\n return {\n \"type\": self.mode.type_name,\n \"description\": self.mode.description,\n \"full_spec\": self.mode.full_spec,\n \"is_running\": self.is_running,\n \"last_exception\": str(self.last_exception) if self.last_exception else None,\n \"listen_addrs\": self.listen_addrs,\n }\n\n async def handle_stream(\n self,\n reader: asyncio.StreamReader | mitmproxy_rs.Stream,\n writer: asyncio.StreamWriter | mitmproxy_rs.Stream,\n ) -> None:\n handler = ProxyConnectionHandler(\n ctx.master, reader, writer, ctx.options, self.mode\n )\n handler.layer = self.make_top_layer(handler.layer.context)\n if isinstance(self.mode, mode_specs.TransparentMode):\n assert isinstance(writer, asyncio.StreamWriter)\n s = cast(socket.socket, writer.get_extra_info(\"socket\"))\n try:\n assert platform.original_addr\n original_dst = platform.original_addr(s)\n except Exception as e:\n logger.error(f\"Transparent mode failure: {e!r}\")\n writer.close()\n return\n else:\n handler.layer.context.client.sockname = original_dst\n handler.layer.context.server.address = original_dst\n elif isinstance(self.mode, (mode_specs.WireGuardMode, mode_specs.LocalMode)):\n handler.layer.context.server.address = writer.get_extra_info(\n \"remote_endpoint\", handler.layer.context.client.sockname\n )\n\n with self.manager.register_connection(handler.layer.context.client.id, handler):\n await handler.handle_client()\n\n async def handle_udp_stream(self, stream: mitmproxy_rs.Stream) -> None:\n await self.handle_stream(stream, stream)\n\n\nclass AsyncioServerInstance(ServerInstance[M], metaclass=ABCMeta):\n _servers: list[asyncio.Server | mitmproxy_rs.UdpServer]\n\n def __init__(self, *args, **kwargs) -> None:\n self._servers = []\n super().__init__(*args, **kwargs)\n\n @property\n def is_running(self) -> bool:\n return bool(self._servers)\n\n @property\n def listen_addrs(self) -> tuple[Address, ...]:\n addrs = []\n for s in self._servers:\n if isinstance(s, mitmproxy_rs.UdpServer):\n addrs.append(s.getsockname())\n else:\n try:\n addrs.extend(sock.getsockname() for sock in s.sockets)\n except OSError: # pragma: no cover\n pass # this can fail during shutdown, see https://github.com/mitmproxy/mitmproxy/issues/6529\n return tuple(addrs)\n\n async def _start(self) -> None:\n assert not self._servers\n host = self.mode.listen_host(ctx.options.listen_host)\n port = self.mode.listen_port(ctx.options.listen_port)\n try:\n self._servers = await self.listen(host, port)\n except OSError as e:\n message = f\"{self.mode.description} failed to listen on {host or '*'}:{port} with {e}\"\n if e.errno == errno.EADDRINUSE and self.mode.custom_listen_port is None:\n assert (\n self.mode.custom_listen_host is None\n ) # since [@ [listen_addr:]listen_port]\n message += f\"\\nTry specifying a different port by using `--mode {self.mode.full_spec}@{port + 2}`.\"\n raise OSError(e.errno, message, e.filename) from e\n\n async def _stop(self) -> None:\n assert self._servers\n try:\n for s in self._servers:\n s.close()\n # https://github.com/python/cpython/issues/104344\n # await asyncio.gather(*[s.wait_closed() for s in self._servers])\n finally:\n # we always reset _server and ignore failures\n self._servers = []\n\n async def listen(\n self, host: str, port: int\n ) -> list[asyncio.Server | mitmproxy_rs.UdpServer]:\n if self.mode.transport_protocol == \"tcp\":\n # workaround for https://github.com/python/cpython/issues/89856:\n # We want both IPv4 and IPv6 sockets to bind to the same port.\n # This may fail (https://github.com/mitmproxy/mitmproxy/pull/5542#issuecomment-1222803291),\n # so we try to cover the 99% case and then give up and fall back to what asyncio does.\n if port == 0:\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n fixed_port = s.getsockname()[1]\n s.close()\n return [\n await asyncio.start_server(self.handle_stream, host, fixed_port)\n ]\n except Exception as e:\n logger.debug(\n f\"Failed to listen on a single port ({e!r}), falling back to default behavior.\"\n )\n return [await asyncio.start_server(self.handle_stream, host, port)]\n elif self.mode.transport_protocol == \"udp\":\n # we start two servers for dual-stack support.\n # On Linux, this would also be achievable by toggling IPV6_V6ONLY off, but this here works cross-platform.\n if host == \"\":\n ipv4 = await mitmproxy_rs.start_udp_server(\n \"0.0.0.0\",\n port,\n self.handle_udp_stream,\n )\n try:\n ipv6 = await mitmproxy_rs.start_udp_server(\n \"::\",\n ipv4.getsockname()[1],\n self.handle_udp_stream,\n )\n except Exception: # pragma: no cover\n logger.debug(\"Failed to listen on '::', listening on IPv4 only.\")\n return [ipv4]\n else: # pragma: no cover\n return [ipv4, ipv6]\n return [\n await mitmproxy_rs.start_udp_server(\n host,\n port,\n self.handle_udp_stream,\n )\n ]\n else:\n raise AssertionError(self.mode.transport_protocol)\n\n\nclass WireGuardServerInstance(ServerInstance[mode_specs.WireGuardMode]):\n _server: mitmproxy_rs.WireGuardServer | None = None\n\n server_key: str\n client_key: str\n\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.TransparentProxy(context)\n\n @property\n def is_running(self) -> bool:\n return self._server is not None\n\n @property\n def listen_addrs(self) -> tuple[Address, ...]:\n if self._server:\n return (self._server.getsockname(),)\n else:\n return tuple()\n\n async def _start(self) -> None:\n assert self._server is None\n host = self.mode.listen_host(ctx.options.listen_host)\n port = self.mode.listen_port(ctx.options.listen_port)\n\n if self.mode.data:\n conf_path = Path(self.mode.data).expanduser()\n else:\n conf_path = Path(ctx.options.confdir).expanduser() / \"wireguard.conf\"\n\n if not conf_path.exists():\n conf_path.parent.mkdir(parents=True, exist_ok=True)\n conf_path.write_text(\n json.dumps(\n {\n \"server_key\": mitmproxy_rs.genkey(),\n \"client_key\": mitmproxy_rs.genkey(),\n },\n indent=4,\n )\n )\n\n try:\n c = json.loads(conf_path.read_text())\n self.server_key = c[\"server_key\"]\n self.client_key = c[\"client_key\"]\n except Exception as e:\n raise ValueError(f\"Invalid configuration file ({conf_path}): {e}\") from e\n # error early on invalid keys\n p = mitmproxy_rs.pubkey(self.client_key)\n _ = mitmproxy_rs.pubkey(self.server_key)\n\n self._server = await mitmproxy_rs.start_wireguard_server(\n host or \"0.0.0.0\",\n port,\n self.server_key,\n [p],\n self.wg_handle_stream,\n self.wg_handle_stream,\n )\n\n conf = self.client_conf()\n assert conf\n logger.info(\"-\" * 60 + \"\\n\" + conf + \"\\n\" + \"-\" * 60)\n\n def client_conf(self) -> str | None:\n if not self._server:\n return None\n host = local_ip.get_local_ip() or local_ip.get_local_ip6()\n port = self.mode.listen_port(ctx.options.listen_port)\n return textwrap.dedent(\n f\"\"\"\n [Interface]\n PrivateKey = {self.client_key}\n Address = 10.0.0.1/32\n DNS = 10.0.0.53\n\n [Peer]\n PublicKey = {mitmproxy_rs.pubkey(self.server_key)}\n AllowedIPs = 0.0.0.0/0\n Endpoint = {host}:{port}\n \"\"\"\n ).strip()\n\n def to_json(self) -> dict:\n return {\"wireguard_conf\": self.client_conf(), **super().to_json()}\n\n async def _stop(self) -> None:\n assert self._server is not None\n try:\n self._server.close()\n await self._server.wait_closed()\n finally:\n self._server = None\n\n async def wg_handle_stream(self, stream: mitmproxy_rs.Stream) -> None:\n await self.handle_stream(stream, stream)\n\n\nclass LocalRedirectorInstance(ServerInstance[mode_specs.LocalMode]):\n _server: ClassVar[mitmproxy_rs.LocalRedirector | None] = None\n \"\"\"The local redirector daemon. Will be started once and then reused for all future instances.\"\"\"\n _instance: ClassVar[LocalRedirectorInstance | None] = None\n \"\"\"The current LocalRedirectorInstance. Will be unset again if an instance is stopped.\"\"\"\n listen_addrs = ()\n\n @property\n def is_running(self) -> bool:\n return self._instance is not None\n\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.TransparentProxy(context)\n\n @classmethod\n async def redirector_handle_stream(\n cls,\n stream: mitmproxy_rs.Stream,\n ) -> None:\n if cls._instance is not None:\n await cls._instance.handle_stream(stream, stream)\n\n async def _start(self) -> None:\n if self._instance:\n raise RuntimeError(\"Cannot spawn more than one local redirector.\")\n\n if self.mode.data.startswith(\"!\"):\n spec = f\"{self.mode.data},{os.getpid()}\"\n elif self.mode.data:\n spec = self.mode.data\n else:\n spec = f\"!{os.getpid()}\"\n\n cls = self.__class__\n cls._instance = self # assign before awaiting to avoid races\n if cls._server is None:\n try:\n cls._server = await mitmproxy_rs.start_local_redirector(\n cls.redirector_handle_stream,\n cls.redirector_handle_stream,\n )\n except Exception:\n cls._instance = None\n raise\n\n cls._server.set_intercept(spec)\n\n async def _stop(self) -> None:\n assert self._instance\n assert self._server\n self.__class__._instance = None\n # We're not shutting down the server because we want to avoid additional UAC prompts.\n self._server.set_intercept(\"\")\n\n\nclass RegularInstance(AsyncioServerInstance[mode_specs.RegularMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.HttpProxy(context)\n\n\nclass UpstreamInstance(AsyncioServerInstance[mode_specs.UpstreamMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.HttpUpstreamProxy(context)\n\n\nclass TransparentInstance(AsyncioServerInstance[mode_specs.TransparentMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.TransparentProxy(context)\n\n\nclass ReverseInstance(AsyncioServerInstance[mode_specs.ReverseMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.ReverseProxy(context)\n\n\nclass Socks5Instance(AsyncioServerInstance[mode_specs.Socks5Mode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.modes.Socks5Proxy(context)\n\n\nclass DnsInstance(AsyncioServerInstance[mode_specs.DnsMode]):\n def make_top_layer(self, context: Context) -> Layer:\n return layers.DNSLayer(context)\n\n\n# class Http3Instance(AsyncioServerInstance[mode_specs.Http3Mode]):\n# def make_top_layer(self, context: Context) -> Layer:\n# return layers.modes.HttpProxy(context)\n", "path": "mitmproxy/proxy/mode_servers.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index e40679ec8d..494dc2535f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ ## Unreleased: mitmproxy next +* Fix a regression introduced in mitmproxy 10.2.0: WireGuard servers + now bind to all interfaces again. + ([#6587](https://github.com/mitmproxy/mitmproxy/pull/6587), @mhils) * Remove stale reference to `ctx.log` in addon documentation. ([#6552](https://github.com/mitmproxy/mitmproxy/pull/6552), @brojonat) * Fix a bug where a traceback is shown during shutdown. diff --git a/mitmproxy/proxy/mode_servers.py b/mitmproxy/proxy/mode_servers.py index c66b61811c..04fda09c5f 100644 --- a/mitmproxy/proxy/mode_servers.py +++ b/mitmproxy/proxy/mode_servers.py @@ -372,7 +372,7 @@ async def _start(self) -> None: _ = mitmproxy_rs.pubkey(self.server_key) self._server = await mitmproxy_rs.start_wireguard_server( - host or "127.0.0.1", + host or "0.0.0.0", port, self.server_key, [p],
chainer__chainer-524
Error type mismatch when broadcast fails When broadcast fails, numpy causes `ValueError`, but cupy causes `RuntimeError`.
[ { "content": "import six\n\nimport cupy\nfrom cupy import internal\n\n\nzip_longest = six.moves.zip_longest\nsix_zip = six.moves.zip\n\n\ndef atleast_1d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 1.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects. Only zero-dimensional array is affected.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_1d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_1d')\n if a.ndim == 0:\n a = a.reshape(1)\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\ndef atleast_2d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 2.\n\n If an input array has dimensions less than two, then this function inserts\n new axes at the head of dimensions to make it have two dimensions.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_2d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_2d')\n if a.ndim == 0:\n a = a.reshape(1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\ndef atleast_3d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 3.\n\n If an input array has dimensions less than three, then this function\n inserts new axes to make it have three dimensions. The place of the new\n axes are following:\n\n - If its shape is ``()``, then the shape of output is ``(1, 1, 1)``.\n - If its shape is ``(N,)``, then the shape of output is ``(1, N, 1)``.\n - If its shape is ``(M, N)``, then the shape of output is ``(M, N, 1)``.\n - Otherwise, the output is the input array itself.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_3d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_3d')\n if a.ndim == 0:\n a = a.reshape(1, 1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :, cupy.newaxis]\n elif a.ndim == 2:\n a = a[:, :, cupy.newaxis]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\nclass broadcast(object):\n \"\"\"Object that performs broadcasting.\n\n CuPy actually uses this class to support broadcasting in various\n operations. Note that this class does not provide an iterator.\n\n Args:\n arrays (tuple of arrays): Arrays to be broadcasted.\n\n Attributes:\n shape (tuple of ints): The broadcasted shape.\n nd (int): Number of dimensions of the broadcasted shape.\n size (int): Total size of the broadcasted shape.\n values (list of arrays): The broadcasted arrays.\n\n .. seealso:: :class:`numpy.broadcast`\n\n \"\"\"\n\n def __init__(self, *arrays):\n ndarray = cupy.ndarray\n rev = slice(None, None, -1)\n shape_arr = [a._shape[rev] for a in arrays\n if isinstance(a, ndarray)]\n r_shape = [max(ss) for ss in zip_longest(*shape_arr, fillvalue=0)]\n\n self.shape = shape = tuple(r_shape[rev])\n self.size = size = internal.prod(shape)\n self.nd = ndim = len(shape)\n\n broadcasted = list(arrays)\n for i, a in enumerate(broadcasted):\n if not isinstance(a, ndarray):\n continue\n\n a_shape = a.shape\n if a_shape == shape:\n continue\n\n r_strides = [\n a_st if sh == a_sh else (0 if a_sh == 1 else None)\n for sh, a_sh, a_st\n in six_zip(r_shape, a._shape[rev], a._strides[rev])]\n\n if None in r_strides:\n raise RuntimeError('Broadcasting failed')\n\n offset = (0,) * (ndim - len(r_strides))\n\n broadcasted[i] = view = a.view()\n view._shape = shape\n view._strides = offset + tuple(r_strides[rev])\n view._size = size\n view._c_contiguous = -1\n view._f_contiguous = -1\n\n self.values = tuple(broadcasted)\n\n\ndef broadcast_arrays(*args):\n \"\"\"Broadcasts given arrays.\n\n Args:\n args (tuple of arrays): Arrays to broadcast for each other.\n\n Returns:\n list: A list of broadcasted arrays.\n\n .. seealso:: :func:`numpy.broadcast_arrays`\n\n \"\"\"\n return broadcast(*args).values\n\n\ndef expand_dims(a, axis):\n \"\"\"Expands given arrays.\n\n Args:\n a (cupy.ndarray): Array to be expanded.\n axis (int): Position where new axis is to be inserted.\n\n Returns:\n cupy.ndarray: The number of dimensions is one greater than that of\n the input array.\n\n .. seealso:: :func:`numpy.expand_dims`\n\n \"\"\"\n shape = a.shape\n if axis < 0:\n axis = axis + len(shape) + 1\n return a.reshape(shape[:axis] + (1,) + shape[axis:])\n\n\ndef squeeze(a, axis=None):\n \"\"\"Removes size-one axes from the shape of an array.\n\n Args:\n a (cupy.ndarray): Array to be reshaped.\n axis (int or tuple of ints): Axes to be removed. This function removes\n all size-one axes by default. If one of the specified axes is not\n of size one, an exception is raised.\n\n Returns:\n cupy.ndarray: An array without (specified) size-one axes.\n\n .. seealso:: :func:`numpy.squeeze`\n\n \"\"\"\n if axis is None:\n axis = tuple(i for i, n in enumerate(a._shape) if n == 1)\n elif isinstance(axis, int):\n axis = axis,\n\n new_shape = []\n new_strides = []\n j = 0\n for i, n in enumerate(a._shape):\n if j < len(axis) and i == axis[j]:\n if n != 1:\n raise RuntimeError('Cannot squeeze dimension of size > 1')\n j += 1\n else:\n new_shape.append(n)\n new_strides.append(a._strides[i])\n\n v = a.view()\n v._shape = tuple(new_shape)\n v._strides = tuple(new_strides)\n v._c_contiguous = -1\n v._f_contiguous = -1\n return v\n", "path": "cupy/manipulation/dims.py" } ]
[ { "content": "import six\n\nimport cupy\nfrom cupy import internal\n\n\nzip_longest = six.moves.zip_longest\nsix_zip = six.moves.zip\n\n\ndef atleast_1d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 1.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects. Only zero-dimensional array is affected.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_1d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_1d')\n if a.ndim == 0:\n a = a.reshape(1)\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\ndef atleast_2d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 2.\n\n If an input array has dimensions less than two, then this function inserts\n new axes at the head of dimensions to make it have two dimensions.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_2d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_2d')\n if a.ndim == 0:\n a = a.reshape(1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\ndef atleast_3d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 3.\n\n If an input array has dimensions less than three, then this function\n inserts new axes to make it have three dimensions. The place of the new\n axes are following:\n\n - If its shape is ``()``, then the shape of output is ``(1, 1, 1)``.\n - If its shape is ``(N,)``, then the shape of output is ``(1, N, 1)``.\n - If its shape is ``(M, N)``, then the shape of output is ``(M, N, 1)``.\n - Otherwise, the output is the input array itself.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_3d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_3d')\n if a.ndim == 0:\n a = a.reshape(1, 1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :, cupy.newaxis]\n elif a.ndim == 2:\n a = a[:, :, cupy.newaxis]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\nclass broadcast(object):\n \"\"\"Object that performs broadcasting.\n\n CuPy actually uses this class to support broadcasting in various\n operations. Note that this class does not provide an iterator.\n\n Args:\n arrays (tuple of arrays): Arrays to be broadcasted.\n\n Attributes:\n shape (tuple of ints): The broadcasted shape.\n nd (int): Number of dimensions of the broadcasted shape.\n size (int): Total size of the broadcasted shape.\n values (list of arrays): The broadcasted arrays.\n\n .. seealso:: :class:`numpy.broadcast`\n\n \"\"\"\n\n def __init__(self, *arrays):\n ndarray = cupy.ndarray\n rev = slice(None, None, -1)\n shape_arr = [a._shape[rev] for a in arrays\n if isinstance(a, ndarray)]\n r_shape = [max(ss) for ss in zip_longest(*shape_arr, fillvalue=0)]\n\n self.shape = shape = tuple(r_shape[rev])\n self.size = size = internal.prod(shape)\n self.nd = ndim = len(shape)\n\n broadcasted = list(arrays)\n for i, a in enumerate(broadcasted):\n if not isinstance(a, ndarray):\n continue\n\n a_shape = a.shape\n if a_shape == shape:\n continue\n\n r_strides = [\n a_st if sh == a_sh else (0 if a_sh == 1 else None)\n for sh, a_sh, a_st\n in six_zip(r_shape, a._shape[rev], a._strides[rev])]\n\n if None in r_strides:\n raise ValueError('Broadcasting failed')\n\n offset = (0,) * (ndim - len(r_strides))\n\n broadcasted[i] = view = a.view()\n view._shape = shape\n view._strides = offset + tuple(r_strides[rev])\n view._size = size\n view._c_contiguous = -1\n view._f_contiguous = -1\n\n self.values = tuple(broadcasted)\n\n\ndef broadcast_arrays(*args):\n \"\"\"Broadcasts given arrays.\n\n Args:\n args (tuple of arrays): Arrays to broadcast for each other.\n\n Returns:\n list: A list of broadcasted arrays.\n\n .. seealso:: :func:`numpy.broadcast_arrays`\n\n \"\"\"\n return broadcast(*args).values\n\n\ndef expand_dims(a, axis):\n \"\"\"Expands given arrays.\n\n Args:\n a (cupy.ndarray): Array to be expanded.\n axis (int): Position where new axis is to be inserted.\n\n Returns:\n cupy.ndarray: The number of dimensions is one greater than that of\n the input array.\n\n .. seealso:: :func:`numpy.expand_dims`\n\n \"\"\"\n shape = a.shape\n if axis < 0:\n axis = axis + len(shape) + 1\n return a.reshape(shape[:axis] + (1,) + shape[axis:])\n\n\ndef squeeze(a, axis=None):\n \"\"\"Removes size-one axes from the shape of an array.\n\n Args:\n a (cupy.ndarray): Array to be reshaped.\n axis (int or tuple of ints): Axes to be removed. This function removes\n all size-one axes by default. If one of the specified axes is not\n of size one, an exception is raised.\n\n Returns:\n cupy.ndarray: An array without (specified) size-one axes.\n\n .. seealso:: :func:`numpy.squeeze`\n\n \"\"\"\n if axis is None:\n axis = tuple(i for i, n in enumerate(a._shape) if n == 1)\n elif isinstance(axis, int):\n axis = axis,\n\n new_shape = []\n new_strides = []\n j = 0\n for i, n in enumerate(a._shape):\n if j < len(axis) and i == axis[j]:\n if n != 1:\n raise RuntimeError('Cannot squeeze dimension of size > 1')\n j += 1\n else:\n new_shape.append(n)\n new_strides.append(a._strides[i])\n\n v = a.view()\n v._shape = tuple(new_shape)\n v._strides = tuple(new_strides)\n v._c_contiguous = -1\n v._f_contiguous = -1\n return v\n", "path": "cupy/manipulation/dims.py" } ]
diff --git a/cupy/manipulation/dims.py b/cupy/manipulation/dims.py index dfb0cc92a39e..5f6158fd34c1 100644 --- a/cupy/manipulation/dims.py +++ b/cupy/manipulation/dims.py @@ -149,7 +149,7 @@ def __init__(self, *arrays): in six_zip(r_shape, a._shape[rev], a._strides[rev])] if None in r_strides: - raise RuntimeError('Broadcasting failed') + raise ValueError('Broadcasting failed') offset = (0,) * (ndim - len(r_strides)) diff --git a/cupy/testing/__init__.py b/cupy/testing/__init__.py index 1858ed467d89..5433c66a4ff1 100644 --- a/cupy/testing/__init__.py +++ b/cupy/testing/__init__.py @@ -18,6 +18,7 @@ numpy_cupy_array_equal = helper.numpy_cupy_array_equal numpy_cupy_array_list_equal = helper.numpy_cupy_array_list_equal numpy_cupy_array_less = helper.numpy_cupy_array_less +numpy_cupy_raises = helper.numpy_cupy_raises for_dtypes = helper.for_dtypes for_all_dtypes = helper.for_all_dtypes for_float_dtypes = helper.for_float_dtypes diff --git a/cupy/testing/helper.py b/cupy/testing/helper.py index 495180e0718a..a9e6367a16ea 100644 --- a/cupy/testing/helper.py +++ b/cupy/testing/helper.py @@ -131,6 +131,30 @@ def test_func(self, *args, **kw): return decorator +def numpy_cupy_raises(name='xp'): + def decorator(impl): + @functools.wraps(impl) + def test_func(self, *args, **kw): + kw[name] = cupy + try: + impl(self, *args, **kw) + cupy_error = None + except Exception as e: + cupy_error = e + + kw[name] = numpy + try: + impl(self, *args, **kw) + numpy_error = None + except Exception as e: + numpy_error = e + + self.assertIs(type(cupy_error), type(numpy_error)) + self.assertIsNotNone(cupy_error) + return test_func + return decorator + + def for_dtypes(dtypes, name='dtype'): def decorator(impl): @functools.wraps(impl) diff --git a/tests/cupy_tests/manipulation_tests/test_dims.py b/tests/cupy_tests/manipulation_tests/test_dims.py index d24710e05e67..0dcb761e0a64 100644 --- a/tests/cupy_tests/manipulation_tests/test_dims.py +++ b/tests/cupy_tests/manipulation_tests/test_dims.py @@ -59,6 +59,12 @@ def test_broadcast(self): self.assertEqual(2 * 3 * 3 * 4, bc.size) self.assertEqual(4, bc.nd) + @testing.numpy_cupy_raises() + def test_broadcast_fail(self, xp): + a = xp.zeros((2, 3)) + b = xp.zeros((3, 2)) + xp.broadcast(a, b) + @testing.numpy_cupy_array_equal() def test_expand_dims0(self, xp): a = testing.shaped_arange((2, 3), xp)
archlinux__archinstall-1954
[master] awesome (desktops in general?) don't install packages? It appears when choosing awesome, install is called: https://github.com/archlinux/archinstall/blob/7326d51161bf6fd7f1c683cf1d7ce09338efe4b7/archinstall/default_profiles/desktops/awesome.py#L23-L24 And super being `XorgProfile`: https://github.com/archlinux/archinstall/blob/7326d51161bf6fd7f1c683cf1d7ce09338efe4b7/archinstall/default_profiles/xorg.py#L1-L21 That class does not have an install so it calls `Profile.install()` which contains: https://github.com/archlinux/archinstall/blob/7326d51161bf6fd7f1c683cf1d7ce09338efe4b7/archinstall/default_profiles/profile.py#L101-L104 Which is just a placeholder? ![screenshot](https://github.com/archlinux/archinstall/assets/861439/cb60a03a-1307-491a-851c-b419544a8b2b) I haven't ran through all the profiles yet, but have we overlooked something here? What happened to all the packages per profile when we moved them to the dataclass structure? :) I obviously missed something in a PR some where hehe
[ { "content": "from typing import List, Optional, Any, TYPE_CHECKING\n\nfrom archinstall.default_profiles.profile import ProfileType\nfrom archinstall.default_profiles.xorg import XorgProfile\n\nif TYPE_CHECKING:\n\tfrom archinstall.lib.installer import Installer\n\t_: Any\n\n\nclass AwesomeProfile(XorgProfile):\n\tdef __init__(self):\n\t\tsuper().__init__('Awesome', ProfileType.WindowMgr, description='')\n\n\t@property\n\tdef packages(self) -> List[str]:\n\t\treturn ['alacritty']\n\n\tdef preview_text(self) -> Optional[str]:\n\t\ttext = str(_('Environment type: {}')).format(self.profile_type.value)\n\t\treturn text + '\\n' + self.packages_text()\n\n\tdef install(self, install_session: 'Installer'):\n\t\tsuper().install(install_session)\n\n\t\t# TODO: Copy a full configuration to ~/.config/awesome/rc.lua instead.\n\t\twith open(f\"{install_session.target}/etc/xdg/awesome/rc.lua\", 'r') as fh:\n\t\t\tawesome_lua = fh.read()\n\n\t\t# Replace xterm with alacritty for a smoother experience.\n\t\tawesome_lua = awesome_lua.replace('\"xterm\"', '\"alacritty\"')\n\n\t\twith open(f\"{install_session.target}/etc/xdg/awesome/rc.lua\", 'w') as fh:\n\t\t\tfh.write(awesome_lua)\n\n\t\t# TODO: Configure the right-click-menu to contain the above packages that were installed. (as a user config)\n", "path": "archinstall/default_profiles/desktops/awesome.py" } ]
[ { "content": "from typing import List, Optional, Any, TYPE_CHECKING\n\nfrom archinstall.default_profiles.profile import ProfileType\nfrom archinstall.default_profiles.xorg import XorgProfile\n\nif TYPE_CHECKING:\n\tfrom archinstall.lib.installer import Installer\n\t_: Any\n\n\nclass AwesomeProfile(XorgProfile):\n\tdef __init__(self):\n\t\tsuper().__init__('Awesome', ProfileType.WindowMgr, description='')\n\n\t@property\n\tdef packages(self) -> List[str]:\n\t\treturn [\n\t\t\t'awesome',\n\t\t\t'alacritty'\n\t\t]\n\n\tdef preview_text(self) -> Optional[str]:\n\t\ttext = str(_('Environment type: {}')).format(self.profile_type.value)\n\t\treturn text + '\\n' + self.packages_text()\n\n\tdef install(self, install_session: 'Installer'):\n\t\tsuper().install(install_session)\n\n\t\t# TODO: Copy a full configuration to ~/.config/awesome/rc.lua instead.\n\t\twith open(f\"{install_session.target}/etc/xdg/awesome/rc.lua\", 'r') as fh:\n\t\t\tawesome_lua = fh.read()\n\n\t\t# Replace xterm with alacritty for a smoother experience.\n\t\tawesome_lua = awesome_lua.replace('\"xterm\"', '\"alacritty\"')\n\n\t\twith open(f\"{install_session.target}/etc/xdg/awesome/rc.lua\", 'w') as fh:\n\t\t\tfh.write(awesome_lua)\n\n\t\t# TODO: Configure the right-click-menu to contain the above packages that were installed. (as a user config)\n", "path": "archinstall/default_profiles/desktops/awesome.py" } ]
diff --git a/archinstall/default_profiles/desktops/awesome.py b/archinstall/default_profiles/desktops/awesome.py index bb4819146e..371e51dbef 100644 --- a/archinstall/default_profiles/desktops/awesome.py +++ b/archinstall/default_profiles/desktops/awesome.py @@ -14,7 +14,10 @@ def __init__(self): @property def packages(self) -> List[str]: - return ['alacritty'] + return [ + 'awesome', + 'alacritty' + ] def preview_text(self) -> Optional[str]: text = str(_('Environment type: {}')).format(self.profile_type.value)
dynaconf__dynaconf-769
[RFC] Resolve depreciation warning for depreciated property kv **Is your feature request related to a problem? Please describe.** Yes, Currently we are hitting the depreciation warning in hvac 0.11 since the kv property is depreciated and adviced to use from `Client.secrets` Clear Warning: DeprecationWarning: Call to deprecated property 'kv'. This property will be removed in version '0.9.0' Please use the 'kv' property on the 'Client.secrets' attribute moving forward **Describe the solution you'd like** Remove the usage of kv property directly in dynaconf and use if from `Client.secrets` **Describe alternatives you've considered** The alternative is not required. [RFC] Resolve depreciation warning for depreciated property kv **Is your feature request related to a problem? Please describe.** Yes, Currently we are hitting the depreciation warning in hvac 0.11 since the kv property is depreciated and adviced to use from `Client.secrets` Clear Warning: DeprecationWarning: Call to deprecated property 'kv'. This property will be removed in version '0.9.0' Please use the 'kv' property on the 'Client.secrets' attribute moving forward **Describe the solution you'd like** Remove the usage of kv property directly in dynaconf and use if from `Client.secrets` **Describe alternatives you've considered** The alternative is not required.
[ { "content": "# docker run -e 'VAULT_DEV_ROOT_TOKEN_ID=myroot' -p 8200:8200 vault\n# pip install hvac\nfrom __future__ import annotations\n\nfrom dynaconf.utils import build_env_list\nfrom dynaconf.utils.parse_conf import parse_conf_data\n\ntry:\n import boto3\nexcept ImportError:\n boto3 = None\n\ntry:\n from hvac import Client\n from hvac.exceptions import InvalidPath\nexcept ImportError:\n raise ImportError(\n \"vault package is not installed in your environment. \"\n \"`pip install dynaconf[vault]` or disable the vault loader with \"\n \"export VAULT_ENABLED_FOR_DYNACONF=false\"\n )\n\n\nIDENTIFIER = \"vault\"\n\n\n# backwards compatibility\n_get_env_list = build_env_list\n\n\ndef get_client(obj):\n client = Client(\n **{k: v for k, v in obj.VAULT_FOR_DYNACONF.items() if v is not None}\n )\n if obj.VAULT_ROLE_ID_FOR_DYNACONF is not None:\n client.auth_approle(\n role_id=obj.VAULT_ROLE_ID_FOR_DYNACONF,\n secret_id=obj.get(\"VAULT_SECRET_ID_FOR_DYNACONF\"),\n )\n elif obj.VAULT_ROOT_TOKEN_FOR_DYNACONF is not None:\n client.token = obj.VAULT_ROOT_TOKEN_FOR_DYNACONF\n elif obj.VAULT_AUTH_WITH_IAM_FOR_DYNACONF:\n if boto3 is None:\n raise ImportError(\n \"boto3 package is not installed in your environment. \"\n \"`pip install boto3` or disable the VAULT_AUTH_WITH_IAM\"\n )\n\n session = boto3.Session()\n credentials = session.get_credentials()\n client.auth.aws.iam_login(\n credentials.access_key,\n credentials.secret_key,\n credentials.token,\n role=obj.VAULT_AUTH_ROLE_FOR_DYNACONF,\n )\n assert client.is_authenticated(), (\n \"Vault authentication error: is VAULT_TOKEN_FOR_DYNACONF or \"\n \"VAULT_ROLE_ID_FOR_DYNACONF defined?\"\n )\n client.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF\n return client\n\n\ndef load(obj, env=None, silent=None, key=None):\n \"\"\"Reads and loads in to \"settings\" a single key or all keys from vault\n\n :param obj: the settings instance\n :param env: settings env default='DYNACONF'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :return: None\n \"\"\"\n client = get_client(obj)\n try:\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\n dirs = client.secrets.kv.v2.list_secrets(\n path=obj.VAULT_PATH_FOR_DYNACONF,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )[\"data\"][\"keys\"]\n else:\n dirs = client.secrets.kv.v1.list_secrets(\n path=obj.VAULT_PATH_FOR_DYNACONF,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )[\"data\"][\"keys\"]\n except InvalidPath:\n # The given path is not a directory\n dirs = []\n # First look for secrets into environments less store\n if not obj.ENVIRONMENTS_FOR_DYNACONF:\n # By adding '', dynaconf will now read secrets from environments-less\n # store which are not written by `dynaconf write` to Vault store\n env_list = [obj.MAIN_ENV_FOR_DYNACONF.lower(), \"\"]\n # Finally, look for secret into all the environments\n else:\n env_list = dirs + build_env_list(obj, env)\n for env in env_list:\n path = \"/\".join([obj.VAULT_PATH_FOR_DYNACONF, env])\n try:\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\n data = client.secrets.kv.v2.read_secret_version(\n path, mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF\n )\n else:\n data = client.secrets.kv.read_secret(\n \"data/\" + path,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )\n except InvalidPath:\n # If the path doesn't exist, ignore it and set data to None\n data = None\n if data:\n # There seems to be a data dict within a data dict,\n # extract the inner data\n data = data.get(\"data\", {}).get(\"data\", {})\n try:\n if (\n obj.VAULT_KV_VERSION_FOR_DYNACONF == 2\n and obj.ENVIRONMENTS_FOR_DYNACONF\n and data\n ):\n data = data.get(\"data\", {})\n if data and key:\n value = parse_conf_data(\n data.get(key), tomlfy=True, box_settings=obj\n )\n if value:\n obj.set(key, value)\n elif data:\n obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)\n except Exception:\n if silent:\n return False\n raise\n\n\ndef write(obj, data=None, **kwargs):\n \"\"\"Write a value in to loader source\n\n :param obj: settings object\n :param data: vars to be stored\n :param kwargs: vars to be stored\n :return:\n \"\"\"\n if obj.VAULT_ENABLED_FOR_DYNACONF is False:\n raise RuntimeError(\n \"Vault is not configured \\n\"\n \"export VAULT_ENABLED_FOR_DYNACONF=true\\n\"\n \"and configure the VAULT_FOR_DYNACONF_* variables\"\n )\n data = data or {}\n data.update(kwargs)\n if not data:\n raise AttributeError(\"Data must be provided\")\n data = {\"data\": data}\n client = get_client(obj)\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 1:\n mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF + \"/data\"\n else:\n mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF\n path = \"/\".join([obj.VAULT_PATH_FOR_DYNACONF, obj.current_env.lower()])\n client.secrets.kv.create_or_update_secret(\n path, secret=data, mount_point=mount_point\n )\n load(obj)\n\n\ndef list_envs(obj, path=\"\"):\n \"\"\"\n This function is a helper to get a list of all the existing envs in\n the source of data, the use case is:\n existing_envs = vault_loader.list_envs(settings)\n for env in exiting_envs:\n with settings.using_env(env): # switch to the env\n # do something with a key of that env\n\n :param obj: settings object\n :param path: path to the vault secrets\n :return: list containing all the keys at the given path\n \"\"\"\n client = get_client(obj)\n path = path or obj.get(\"VAULT_PATH_FOR_DYNACONF\")\n try:\n return client.list(f\"/secret/metadata/{path}\")[\"data\"][\"keys\"]\n except TypeError:\n return []\n", "path": "dynaconf/loaders/vault_loader.py" } ]
[ { "content": "# docker run -e 'VAULT_DEV_ROOT_TOKEN_ID=myroot' -p 8200:8200 vault\n# pip install hvac\nfrom __future__ import annotations\n\nfrom dynaconf.utils import build_env_list\nfrom dynaconf.utils.parse_conf import parse_conf_data\n\ntry:\n import boto3\nexcept ImportError:\n boto3 = None\n\ntry:\n from hvac import Client\n from hvac.exceptions import InvalidPath\nexcept ImportError:\n raise ImportError(\n \"vault package is not installed in your environment. \"\n \"`pip install dynaconf[vault]` or disable the vault loader with \"\n \"export VAULT_ENABLED_FOR_DYNACONF=false\"\n )\n\n\nIDENTIFIER = \"vault\"\n\n\n# backwards compatibility\n_get_env_list = build_env_list\n\n\ndef get_client(obj):\n client = Client(\n **{k: v for k, v in obj.VAULT_FOR_DYNACONF.items() if v is not None}\n )\n if obj.VAULT_ROLE_ID_FOR_DYNACONF is not None:\n client.auth_approle(\n role_id=obj.VAULT_ROLE_ID_FOR_DYNACONF,\n secret_id=obj.get(\"VAULT_SECRET_ID_FOR_DYNACONF\"),\n )\n elif obj.VAULT_ROOT_TOKEN_FOR_DYNACONF is not None:\n client.token = obj.VAULT_ROOT_TOKEN_FOR_DYNACONF\n elif obj.VAULT_AUTH_WITH_IAM_FOR_DYNACONF:\n if boto3 is None:\n raise ImportError(\n \"boto3 package is not installed in your environment. \"\n \"`pip install boto3` or disable the VAULT_AUTH_WITH_IAM\"\n )\n\n session = boto3.Session()\n credentials = session.get_credentials()\n client.auth.aws.iam_login(\n credentials.access_key,\n credentials.secret_key,\n credentials.token,\n role=obj.VAULT_AUTH_ROLE_FOR_DYNACONF,\n )\n assert client.is_authenticated(), (\n \"Vault authentication error: is VAULT_TOKEN_FOR_DYNACONF or \"\n \"VAULT_ROLE_ID_FOR_DYNACONF defined?\"\n )\n client.secrets.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF\n return client\n\n\ndef load(obj, env=None, silent=None, key=None):\n \"\"\"Reads and loads in to \"settings\" a single key or all keys from vault\n\n :param obj: the settings instance\n :param env: settings env default='DYNACONF'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :return: None\n \"\"\"\n client = get_client(obj)\n try:\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\n dirs = client.secrets.kv.v2.list_secrets(\n path=obj.VAULT_PATH_FOR_DYNACONF,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )[\"data\"][\"keys\"]\n else:\n dirs = client.secrets.kv.v1.list_secrets(\n path=obj.VAULT_PATH_FOR_DYNACONF,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )[\"data\"][\"keys\"]\n except InvalidPath:\n # The given path is not a directory\n dirs = []\n # First look for secrets into environments less store\n if not obj.ENVIRONMENTS_FOR_DYNACONF:\n # By adding '', dynaconf will now read secrets from environments-less\n # store which are not written by `dynaconf write` to Vault store\n env_list = [obj.MAIN_ENV_FOR_DYNACONF.lower(), \"\"]\n # Finally, look for secret into all the environments\n else:\n env_list = dirs + build_env_list(obj, env)\n for env in env_list:\n path = \"/\".join([obj.VAULT_PATH_FOR_DYNACONF, env])\n try:\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\n data = client.secrets.kv.v2.read_secret_version(\n path, mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF\n )\n else:\n data = client.secrets.kv.read_secret(\n \"data/\" + path,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )\n except InvalidPath:\n # If the path doesn't exist, ignore it and set data to None\n data = None\n if data:\n # There seems to be a data dict within a data dict,\n # extract the inner data\n data = data.get(\"data\", {}).get(\"data\", {})\n try:\n if (\n obj.VAULT_KV_VERSION_FOR_DYNACONF == 2\n and obj.ENVIRONMENTS_FOR_DYNACONF\n and data\n ):\n data = data.get(\"data\", {})\n if data and key:\n value = parse_conf_data(\n data.get(key), tomlfy=True, box_settings=obj\n )\n if value:\n obj.set(key, value)\n elif data:\n obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)\n except Exception:\n if silent:\n return False\n raise\n\n\ndef write(obj, data=None, **kwargs):\n \"\"\"Write a value in to loader source\n\n :param obj: settings object\n :param data: vars to be stored\n :param kwargs: vars to be stored\n :return:\n \"\"\"\n if obj.VAULT_ENABLED_FOR_DYNACONF is False:\n raise RuntimeError(\n \"Vault is not configured \\n\"\n \"export VAULT_ENABLED_FOR_DYNACONF=true\\n\"\n \"and configure the VAULT_FOR_DYNACONF_* variables\"\n )\n data = data or {}\n data.update(kwargs)\n if not data:\n raise AttributeError(\"Data must be provided\")\n data = {\"data\": data}\n client = get_client(obj)\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 1:\n mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF + \"/data\"\n else:\n mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF\n path = \"/\".join([obj.VAULT_PATH_FOR_DYNACONF, obj.current_env.lower()])\n client.secrets.kv.create_or_update_secret(\n path, secret=data, mount_point=mount_point\n )\n load(obj)\n\n\ndef list_envs(obj, path=\"\"):\n \"\"\"\n This function is a helper to get a list of all the existing envs in\n the source of data, the use case is:\n existing_envs = vault_loader.list_envs(settings)\n for env in exiting_envs:\n with settings.using_env(env): # switch to the env\n # do something with a key of that env\n\n :param obj: settings object\n :param path: path to the vault secrets\n :return: list containing all the keys at the given path\n \"\"\"\n client = get_client(obj)\n path = path or obj.get(\"VAULT_PATH_FOR_DYNACONF\")\n try:\n return client.list(f\"/secret/metadata/{path}\")[\"data\"][\"keys\"]\n except TypeError:\n return []\n", "path": "dynaconf/loaders/vault_loader.py" } ]
diff --git a/dynaconf/loaders/vault_loader.py b/dynaconf/loaders/vault_loader.py index 9408a5aa0..c6b168316 100644 --- a/dynaconf/loaders/vault_loader.py +++ b/dynaconf/loaders/vault_loader.py @@ -58,7 +58,7 @@ def get_client(obj): "Vault authentication error: is VAULT_TOKEN_FOR_DYNACONF or " "VAULT_ROLE_ID_FOR_DYNACONF defined?" ) - client.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF + client.secrets.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF return client
spack__spack-25373
Installation issue: parsec 3.0.2012: Unknown CMake command "FLEX_TARGET" ### Steps to reproduce the issue `parsec` fails at CMake stage using: * `spack@develop` (81be31aee091a6d108a40e0f3dc7ff3bdfa93f26 from `Mon Aug 9 16:07:39 2021 +0200`) * Ubuntu 20.04, GCC 9.3.0 Concrete spec: [parsec.spec.yaml.txt](https://github.com/spack/spack/files/6963572/parsec.spec.yaml.txt) Using DockerHub container image `ecpe4s/ubuntu20.04-runner-x86_64:2021-07-01` for reproducer below: ``` $> git clone https://github.com/spack/spack $> (cd spack && git checkout 81be31aee091) $> . spack/share/spack/setup-env.sh $> spack mirror add E4S https://cache.e4s.io $> spack buildcache keys -it $> spack install -f ./parsec.spec.yaml ... ==> Installing parsec-3.0.2012-27wbe2dfa5kf66pjltlewvhujxhplok4 ==> No binary for parsec-3.0.2012-27wbe2dfa5kf66pjltlewvhujxhplok4 found: installing from source ==> Using cached archive: /spack/var/spack/cache/_source-cache/archive/f5/f565bcfffe106be8237b6aea3e83a5770607b7236606414b6f270244fa6ec3bc.tar.bz2 ==> No patches needed for parsec ==> parsec: Executing phase: 'cmake' ==> Error: ProcessError: Command exited with status 1: 'cmake' '-G' 'Unix Makefiles' '-DCMAKE_INSTALL_PREFIX:STRING=/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/parsec-3.0.2012-27wbe2dfa5kf66pjltlewvhujxhplok4' '-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo' '-DCMAKE_INTERPROCEDURAL_OPTIMIZATION:BOOL=OFF' '-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON' '-DCMAKE_INSTALL_RPATH_USE_LINK_PATH:BOOL=OFF' '-DCMAKE_INSTALL_RPATH:STRING=/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/parsec-3.0.2012-27wbe2dfa5kf66pjltlewvhujxhplok4/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/parsec-3.0.2012-27wbe2dfa5kf66pjltlewvhujxhplok4/lib64;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/libxml2-2.9.10-qlt5ajqqmtin3atqcycnv67oex63cvny/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/libiconv-1.16-w6zptbclncs3jsnl3i4qtg72fgheiwdf/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/xz-5.2.5-ynkyqliadxgrqpk4mavevv3onehjrdco/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/zlib-1.2.11-p7dmb5pqi6svirhovljrkp3uyfiytz3m/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/hwloc-2.4.1-eeigvjc57oehl37canx4h5lfganpvdq2/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/libpciaccess-0.16-ux5xkhgfhhmitpkxfuf5h6ooudsiwhd4/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/ncurses-6.2-n5vhymfigwg6e45k7thdfyeswuhxojtx/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/mpich-3.4.2-z3qvcplqpvri2tliohtqp7ei2lnvqsf3/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/libfabric-1.12.1-lgug4cxazbl24jdtmttovd5qpnr6os7z/lib;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/cuda-11.4.0-vklb4yvqfk6r4n4ezstwd65ytmigvfed/lib64' '-DCMAKE_PREFIX_PATH:STRING=/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/python-3.8.10-4hkgzkkd2svggorn2u2yvtiwrb6ndwfl;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/mpich-3.4.2-z3qvcplqpvri2tliohtqp7ei2lnvqsf3;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/libfabric-1.12.1-lgug4cxazbl24jdtmttovd5qpnr6os7z;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/hwloc-2.4.1-eeigvjc57oehl37canx4h5lfganpvdq2;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/libpciaccess-0.16-ux5xkhgfhhmitpkxfuf5h6ooudsiwhd4;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/cuda-11.4.0-vklb4yvqfk6r4n4ezstwd65ytmigvfed;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/libxml2-2.9.10-qlt5ajqqmtin3atqcycnv67oex63cvny;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/xz-5.2.5-ynkyqliadxgrqpk4mavevv3onehjrdco;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/cmake-3.20.5-zyz2eldbrcih3f6l37dyvdxmyoiyvhv5;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/zlib-1.2.11-p7dmb5pqi6svirhovljrkp3uyfiytz3m;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/libiconv-1.16-w6zptbclncs3jsnl3i4qtg72fgheiwdf;/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/ncurses-6.2-n5vhymfigwg6e45k7thdfyeswuhxojtx' '-DBUILD_SHARED_LIBS:BOOL=ON' '-DPARSEC_GPU_WITH_CUDA:BOOL=ON' '-DPARSEC_PROF_TRACE:BOOL=OFF' '-DPARSEC_DEBUG_HISTORY:BOOL=OFF' '-DPARSEC_DEBUG_PARANOID:BOOL=OFF' '/tmp/root/spack-stage/spack-stage-parsec-3.0.2012-27wbe2dfa5kf66pjltlewvhujxhplok4/spack-src' 1 error found in build log: 181 -- Looking for cudaDeviceCanAccessPeer 182 -- Looking for cudaDeviceCanAccessPeer - found 183 -- Looking for shm_open 184 -- Looking for shm_open - not found 185 -- Looking for shm_open in rt 186 -- Looking for shm_open in rt - found >> 187 CMake Error at parsec/CMakeLists.txt:39 (FLEX_TARGET): 188 Unknown CMake command "FLEX_TARGET". 189 190 191 -- Configuring incomplete, errors occurred! 192 See also "/tmp/root/spack-stage/spack-stage-parsec-3.0.2012-27wbe2dfa5kf66pjltlewvhujxhplok4/spack-build-27wbe2d/CMakeFiles/CMakeOutput.log". 193 See also "/tmp/root/spack-stage/spack-stage-parsec-3.0.2012-27wbe2dfa5kf66pjltlewvhujxhplok4/spack-build-27wbe2d/CMakeFiles/CMakeError.log". See build log for details: /tmp/root/spack-stage/spack-stage-parsec-3.0.2012-27wbe2dfa5kf66pjltlewvhujxhplok4/spack-build-out.txt ``` ### Information on your system * **Spack:** 0.13.3-9086-c5229a3913 * **Python:** 3.8.5 * **Platform:** linux-ubuntu20.04-cascadelake * **Concretizer:** original ### Additional information Build log: [parsec-build-out.txt](https://github.com/spack/spack/files/6963587/parsec-build-out.txt) CMake log: [parsec-01-cmake-out.txt](https://github.com/spack/spack/files/6963585/parsec-01-cmake-out.txt) @abouteiller @bosilca @herault ### General information - [X] I have run `spack debug report` and reported the version of Spack/Python/Platform - [X] I have run `spack maintainers <name-of-the-package>` and **@mentioned** any maintainers - [X] I have uploaded the build log and environment files - [X] I have searched the issues of this repo and believe this is not a duplicate
[ { "content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n#\nimport llnl.util.tty as tty\n\nfrom spack import *\n\n\nclass Parsec(CMakePackage, CudaPackage):\n \"\"\"PaRSEC: the Parallel Runtime Scheduler and Execution Controller\n\n PaRSEC is a runtime and a programming toolbox that support the design and\n parallel execution of micro-tasks on distributed, heterogeneous systems.\n \"\"\"\n\n homepage = \"https://icl.utk.edu/dte\"\n git = \"https://bitbucket.org/icldistcomp/parsec.git\"\n url = \"https://bitbucket.org/icldistcomp/parsec/get/parsec-3.0.2012.tar.bz2\"\n list_url = \"https://bitbucket.org/icldistcomp/parsec/downloads/?tab=tags\"\n maintainers = ['abouteiller', 'bosilca', 'herault']\n\n test_requires_compiler = True\n\n version('master', branch='master')\n version('3.0.2012', sha256='f565bcfffe106be8237b6aea3e83a5770607b7236606414b6f270244fa6ec3bc')\n version('1.1.0', sha256='d2928033c121000ae0a554f1e7f757c1f22274a8b74457ecd52744ae1f70b95a', url='https://bitbucket.org/icldistcomp/parsec/get/v1.1.0.tar.bz2')\n\n variant('build_type', default='RelWithDebInfo', description='CMake build type', values=('Debug', 'Release', 'RelWithDebInfo'))\n variant('shared', default=True, description='Build a shared library')\n variant('cuda', default=True, description='Build with CUDA')\n variant('profile', default=False, description='Generate profiling data')\n variant('debug_verbose', default=False, description='Debug version with verbose and paranoid (incurs performance overhead!)')\n conflicts('+debug_verbose build_type=Release', msg='You need to set build_type=Debug for +debug_verbose')\n conflicts('+debug_verbose build_type=RelWithDebInfo', msg='You need to set build_type=Debug for +debug_verbose')\n # TODO: Spack does not handle cross-compilation atm\n # variant('xcompile', default=False, description='Cross compile')\n\n depends_on('[email protected]:', type='build')\n depends_on('python', type='build')\n depends_on('hwloc')\n depends_on('mpi')\n depends_on('papi', when='+profile')\n depends_on('python', type=('build', 'run'), when='+profile')\n depends_on('py-cython', type=('build', 'run'), when='+profile')\n depends_on('py-pandas', type=('build', 'run'), when='+profile')\n depends_on('py-matplotlib', type=('build', 'run'), when='+profile')\n depends_on('py-tables', type=('build', 'run'), when='+profile')\n\n def cmake_args(self):\n args = [\n self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),\n self.define_from_variant('PARSEC_GPU_WITH_CUDA', 'cuda'),\n self.define_from_variant('PARSEC_PROF_TRACE', 'profile'),\n self.define_from_variant('PARSEC_DEBUG_HISTORY', 'debug_verbose'),\n self.define_from_variant('PARSEC_DEBUG_PARANOID', 'debug_verbose'),\n ]\n return args\n\n @run_after('install')\n @on_package_attributes(run_tests=True)\n def check(self):\n \"\"\"Run ctest after building binary.\"\"\"\n with working_dir(self.build_directory):\n try:\n ctest('--output-on-failure', '-j1')\n except ProcessError:\n warn = 'ctest tests failed.\\n'\n warn += 'Please report this failure to:\\n'\n warn += 'https://bitbucket.org/icldistcomp/parsec/issues'\n tty.msg(warn)\n\n def test(self):\n \"\"\"Compile and run a user program with the installed library\"\"\"\n with working_dir(join_path(self.install_test_root,\n 'contrib/build_with_parsec')):\n self.run_test('cmake',\n options=['.'],\n purpose='Check if CMake can find PaRSEC and its targets')\n self.run_test('make',\n purpose='Check if tests can compile')\n self.run_test('./dtd_test_allreduce')\n self.run_test('./write_check')\n\n @run_after('install')\n def cache_test_sources(self):\n srcs = ['contrib/build_with_parsec']\n self.cache_extra_test_sources(srcs)\n", "path": "var/spack/repos/builtin/packages/parsec/package.py" } ]
[ { "content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n#\nimport llnl.util.tty as tty\n\nfrom spack import *\n\n\nclass Parsec(CMakePackage, CudaPackage):\n \"\"\"PaRSEC: the Parallel Runtime Scheduler and Execution Controller\n\n PaRSEC is a runtime and a programming toolbox that support the design and\n parallel execution of micro-tasks on distributed, heterogeneous systems.\n \"\"\"\n\n homepage = \"https://icl.utk.edu/dte\"\n git = \"https://bitbucket.org/icldistcomp/parsec.git\"\n url = \"https://bitbucket.org/icldistcomp/parsec/get/parsec-3.0.2012.tar.bz2\"\n list_url = \"https://bitbucket.org/icldistcomp/parsec/downloads/?tab=tags\"\n maintainers = ['abouteiller', 'bosilca', 'herault']\n\n test_requires_compiler = True\n\n version('master', branch='master')\n version('3.0.2012', sha256='f565bcfffe106be8237b6aea3e83a5770607b7236606414b6f270244fa6ec3bc')\n version('1.1.0', sha256='d2928033c121000ae0a554f1e7f757c1f22274a8b74457ecd52744ae1f70b95a', url='https://bitbucket.org/icldistcomp/parsec/get/v1.1.0.tar.bz2')\n\n variant('build_type', default='RelWithDebInfo', description='CMake build type', values=('Debug', 'Release', 'RelWithDebInfo'))\n variant('shared', default=True, description='Build a shared library')\n variant('cuda', default=True, description='Build with CUDA')\n variant('profile', default=False, description='Generate profiling data')\n variant('debug_verbose', default=False, description='Debug version with verbose and paranoid (incurs performance overhead!)')\n conflicts('+debug_verbose build_type=Release', msg='You need to set build_type=Debug for +debug_verbose')\n conflicts('+debug_verbose build_type=RelWithDebInfo', msg='You need to set build_type=Debug for +debug_verbose')\n # TODO: Spack does not handle cross-compilation atm\n # variant('xcompile', default=False, description='Cross compile')\n\n depends_on('[email protected]:', type='build')\n depends_on('python', type='build')\n depends_on('flex', type='build')\n depends_on('bison', type='build')\n depends_on('hwloc')\n depends_on('mpi')\n depends_on('papi', when='+profile')\n depends_on('python', type=('build', 'run'), when='+profile')\n depends_on('py-cython', type=('build', 'run'), when='+profile')\n depends_on('py-pandas', type=('build', 'run'), when='+profile')\n depends_on('py-matplotlib', type=('build', 'run'), when='+profile')\n depends_on('py-tables', type=('build', 'run'), when='+profile')\n\n def cmake_args(self):\n args = [\n self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),\n self.define_from_variant('PARSEC_GPU_WITH_CUDA', 'cuda'),\n self.define_from_variant('PARSEC_PROF_TRACE', 'profile'),\n self.define_from_variant('PARSEC_DEBUG_HISTORY', 'debug_verbose'),\n self.define_from_variant('PARSEC_DEBUG_PARANOID', 'debug_verbose'),\n ]\n return args\n\n @run_after('install')\n @on_package_attributes(run_tests=True)\n def check(self):\n \"\"\"Run ctest after building binary.\"\"\"\n with working_dir(self.build_directory):\n try:\n ctest('--output-on-failure', '-j1')\n except ProcessError:\n warn = 'ctest tests failed.\\n'\n warn += 'Please report this failure to:\\n'\n warn += 'https://bitbucket.org/icldistcomp/parsec/issues'\n tty.msg(warn)\n\n def test(self):\n \"\"\"Compile and run a user program with the installed library\"\"\"\n with working_dir(join_path(self.install_test_root,\n 'contrib/build_with_parsec')):\n self.run_test('cmake',\n options=['.'],\n purpose='Check if CMake can find PaRSEC and its targets')\n self.run_test('make',\n purpose='Check if tests can compile')\n self.run_test('./dtd_test_allreduce')\n self.run_test('./write_check')\n\n @run_after('install')\n def cache_test_sources(self):\n srcs = ['contrib/build_with_parsec']\n self.cache_extra_test_sources(srcs)\n", "path": "var/spack/repos/builtin/packages/parsec/package.py" } ]
diff --git a/var/spack/repos/builtin/packages/parsec/package.py b/var/spack/repos/builtin/packages/parsec/package.py index ab154e71b6be6c..014b8e025619e4 100644 --- a/var/spack/repos/builtin/packages/parsec/package.py +++ b/var/spack/repos/builtin/packages/parsec/package.py @@ -39,6 +39,8 @@ class Parsec(CMakePackage, CudaPackage): depends_on('[email protected]:', type='build') depends_on('python', type='build') + depends_on('flex', type='build') + depends_on('bison', type='build') depends_on('hwloc') depends_on('mpi') depends_on('papi', when='+profile')
mozmeao__snippets-service-1221
FieldError on /admin/base/job/add/ https://sentry.prod.mozaws.net/operations/snippets/issues/6453618/?query=is:unresolved "'completed_on' cannot be specified for Job model form as it is a non-editable field. Check fields/fieldsets/exclude attributes of class JobAdmin." FieldError on /admin/base/job/add/ https://sentry.prod.mozaws.net/operations/snippets/issues/6453618/?query=is:unresolved "'completed_on' cannot be specified for Job model form as it is a non-editable field. Check fields/fieldsets/exclude attributes of class JobAdmin."
[ { "content": "import copy\nimport re\n\nfrom django.conf import settings\nfrom django.contrib import admin, messages\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.db.models import TextField, Q\nfrom django.http import HttpResponseRedirect\nfrom django.template.loader import get_template\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom django_ace import AceWidget\nfrom django_admin_listfilter_dropdown.filters import (RelatedDropdownFilter,\n RelatedOnlyDropdownFilter)\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom reversion.admin import VersionAdmin\nfrom taggit_helpers.admin import TaggitListFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.admin import actions, filters\n\n\nMATCH_LOCALE_REGEX = re.compile(r'(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass RelatedJobsMixin():\n def related_published_jobs(self, obj):\n return obj.jobs.filter(status=models.Job.PUBLISHED).count()\n\n def related_total_jobs(self, obj):\n return obj.jobs.count()\n\n def jobs_list(self, obj):\n \"\"\"List Related Jobs.\"\"\"\n template = get_template('base/jobs_related_with_obj.jinja')\n return mark_safe(\n template.render({\n 'jobs': obj.jobs.all().order_by('-id')\n })\n )\n\n\nclass RelatedSnippetsMixin():\n def related_published_jobs(self, obj):\n return models.Job.objects.filter(\n status=models.Job.PUBLISHED, snippet__in=obj.snippets.all()).count()\n\n def related_total_snippets(self, obj):\n return obj.snippets.count()\n\n def snippet_list(self, obj):\n \"\"\"List Related Snippets.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(\n template.render({\n 'snippets': obj.snippets.all().order_by('-id')\n })\n )\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = models.JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass IconAdmin(RelatedSnippetsMixin, admin.ModelAdmin):\n search_fields = [\n 'name',\n 'image',\n ]\n readonly_fields = [\n 'height',\n 'width',\n 'size',\n 'preview',\n 'creator',\n 'created',\n 'snippet_list',\n 'related_total_snippets',\n 'related_published_jobs',\n ]\n list_display_links = [\n 'id',\n 'name',\n ]\n list_display = [\n 'id',\n 'name',\n 'width',\n 'height',\n 'related_total_snippets',\n 'related_published_jobs',\n 'preview',\n ]\n list_filter = [\n filters.IconRelatedPublishedASRSnippetFilter,\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippetsJobs.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def size(self, obj):\n return '{:.0f} KiB'.format(obj.image.size / 1024)\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n super().save_model(request, obj, form, change)\n\n def preview(self, obj):\n template = get_template('base/preview_image.jinja')\n return mark_safe(template.render({'image': obj.image}))\n\n\nclass SimpleTemplateInline(admin.StackedInline):\n model = models.SimpleTemplate\n form = forms.SimpleTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_snippet',\n ]\n raw_id_fields = [\n 'section_title_icon',\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': ('title_icon', 'title'),\n }),\n ('Section', {\n 'fields': ('section_title_icon', 'section_title_text', 'section_title_url',),\n }),\n ('Main', {\n 'fields': ('icon', 'text', 'button_label',\n 'button_url', 'button_color', 'button_background_color'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'tall', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FundraisingTemplateInline(admin.StackedInline):\n model = models.FundraisingTemplate\n form = forms.FundraisingTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'eoy_snippet',\n ]\n raw_id_fields = [\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': (\n 'title_icon',\n 'title'\n ),\n }),\n ('Main', {\n 'fields': (\n 'icon',\n 'text',\n 'text_color',\n 'background_color',\n 'highlight_color',\n )\n }),\n ('Form Configuration', {\n 'fields': (\n 'donation_form_url',\n 'currency_code',\n 'locale',\n 'selected_button',\n 'button_label',\n 'button_color',\n 'button_background_color',\n 'monthly_checkbox_label_text',\n )\n }),\n ('Donation', {\n 'fields': (\n ('donation_amount_first', 'donation_amount_second',\n 'donation_amount_third', 'donation_amount_fourth',),\n )\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'test', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FxASignupTemplateInline(admin.StackedInline):\n model = models.FxASignupTemplate\n form = forms.FxASignupTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'fxa_signup_snippet',\n ]\n raw_id_fields = [\n 'scene1_section_title_icon',\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Section', {\n 'fields': (\n 'scene1_section_title_icon',\n 'scene1_section_title_text',\n 'scene1_section_title_url',\n )\n }),\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title',\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_dismiss_button_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'utm_term',\n 'utm_campaign',\n 'block_button_text',\n 'do_not_autoblock',\n ),\n })\n )\n\n\nclass NewsletterTemplateInline(admin.StackedInline):\n model = models.NewsletterTemplate\n form = forms.NewsletterTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'newsletter_snippet',\n ]\n raw_id_fields = [\n 'scene1_section_title_icon',\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Section', {\n 'fields': (\n 'scene1_section_title_icon',\n 'scene1_section_title_text',\n 'scene1_section_title_url',\n )\n }),\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_privacy_html',\n 'scene2_newsletter',\n 'scene2_dismiss_button_text',\n 'locale',\n 'success_text',\n 'error_text',\n 'retry_button_label',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock',\n ),\n })\n )\n\n\nclass SendToDeviceTemplateInline(admin.StackedInline):\n model = models.SendToDeviceTemplate\n form = forms.SendToDeviceTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'send_to_device_snippet',\n ]\n raw_id_fields = [\n 'scene1_section_title_icon',\n 'scene1_title_icon',\n 'scene1_icon',\n 'scene2_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Section', {\n 'fields': (\n 'scene1_section_title_icon',\n 'scene1_section_title_text',\n 'scene1_section_title_url',\n )\n }),\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_icon',\n 'scene2_text',\n\n 'scene2_button_label',\n 'scene2_input_placeholder',\n 'scene2_disclaimer_html',\n 'scene2_dismiss_button_text',\n\n 'locale',\n ('include_sms', 'message_id_sms',),\n 'country',\n 'message_id_email',\n 'success_title',\n 'success_text',\n 'error_text',\n 'retry_button_label',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock',\n ),\n })\n )\n\n\nclass SimpleBelowSearchTemplateInline(admin.StackedInline):\n model = models.SimpleBelowSearchTemplate\n form = forms.SimpleBelowSearchTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_below_search_snippet',\n ]\n raw_id_fields = [\n 'icon',\n ]\n\n fieldsets = (\n ('Main', {\n 'fields': ('icon', 'title', 'text', 'button_label',\n 'button_url', 'button_color', 'button_background_color'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'do_not_autoblock'),\n })\n\n )\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n inlines = [\n SimpleTemplateInline,\n FundraisingTemplateInline,\n FxASignupTemplateInline,\n NewsletterTemplateInline,\n SendToDeviceTemplateInline,\n SimpleBelowSearchTemplateInline,\n ]\n list_display_links = [\n 'id',\n ]\n list_display = [\n 'id',\n 'custom_name_with_tags',\n 'snippet_status',\n 'locale',\n 'modified',\n ]\n list_filter = [\n filters.TemplateFilter,\n ['locale', RelatedDropdownFilter],\n ['jobs__targets', RelatedOnlyDropdownFilter],\n 'jobs__status',\n ['jobs__campaign', RelatedDropdownFilter],\n TaggitListFilter,\n ['category', RelatedDropdownFilter],\n filters.ModifiedFilter,\n ]\n search_fields = [\n 'name',\n 'id',\n 'jobs__campaign__name',\n 'jobs__targets__name',\n 'category__name',\n ]\n autocomplete_fields = [\n 'category',\n ]\n preserve_filters = True\n readonly_fields = [\n 'id',\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n 'job_status',\n 'snippet_status',\n ]\n actions = [\n actions.duplicate_snippets_action,\n ]\n save_on_top = True\n save_as = True\n view_on_site = False\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'id',\n 'name',\n 'tags',\n 'creator',\n 'category',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n )\n }),\n ('Status', {\n 'fields': (\n 'snippet_status',\n 'job_status',\n )\n }),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> opens Firefox Accounts</li>\n <li><code>special:monitor</code> links User to Firefox Monitor and directly authenticates them. Works only in buttons. Works only after Firefox 69.</li>\n <li><code>special:about:ABOUT_PAGE</code> links to an About page. Get a list of About pages by typing <code>about:about</code> in your awesome bar. Example: <code>special:about:protections</code>.\n <li><code>special:preferences</code> opens the Firefox Preferences tab. Example: <code>special:preferences</code>.\n <li><code>special:highlight:HIGHLIGHT</code> highlights a button in the browser chrome. Get a list of <a href=\"https://bedrock.readthedocs.io/en/latest/uitour.html#showhighlight-target-effect\">available highlights</a>. Example: <code>special:highlight:logins</code>. Works only after Firefox 71.\n <li><code>special:menu:MENU</code> opens a targeted menu in the browser chrome. Get a list of <a href=\"https://bedrock.readthedocs.io/en/latest/uitour.html#showmenu-target-callback\">available menus</a>. Example: <code>special:menu:appMenu</code>.\n </ol><br/>\n <strong>Content Variables:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n Similarly <code>[[campaign_slug]]</code> gets replaced by Campaign Slug,\n <code>[[channels]]</code> by targeted channels, <code>[[job_id]]</code>\n by Job ID.\n <br/>\n Example: This is a <code>&lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt;</code>\n <br/>\n ''' # noqa\n ),\n 'fields': (\n 'status',\n 'locale',\n 'template_chooser',\n ),\n 'classes': ('template-fieldset',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified')),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ASRSnippetAdmin.css',\n 'css/admin/descriptionColorize.css',\n 'css/admin/IDFieldHighlight.css',\n 'css/admin/InlineTemplates.css',\n 'css/admin/CustomNameWithTags.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n 'js/clipboard.min.js',\n 'js/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url_light_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlLight\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlLight\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_light_theme.short_description = 'Light Themed Preview URL'\n\n def preview_url_dark_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlDark\">{obj.get_preview_url(dark=True)}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlDark\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_dark_theme.short_description = 'Dark Themed Preview URL'\n\n def snippet_status(self, obj):\n if obj.jobs.filter(status=models.Job.PUBLISHED).exists():\n msg = 'Published'\n elif obj.jobs.filter(status=models.Job.SCHEDULED).exists():\n msg = 'Scheduled'\n else:\n msg = 'Not Scheduled'\n return mark_safe(\n '<span id=\"snippet_status\" class={color_class}>{msg}</span>'.format(\n color_class=msg.lower(), msg=msg\n )\n )\n snippet_status.short_description = 'Status'\n\n def job_status(self, obj):\n changelist_url = '{reverse}?snippet__id__exact={id}'.format(\n reverse=reverse('admin:base_job_changelist'),\n id=obj.id,\n )\n draft_jobs_count = scheduled_jobs_count = published_jobs_count = 0\n # Count job types in Python to avoid multiple DB queries.\n for job in obj.jobs.all():\n if job.status == models.Job.DRAFT:\n draft_jobs_count += 1\n elif job.status == models.Job.SCHEDULED:\n scheduled_jobs_count += 1\n elif job.status == models.Job.PUBLISHED:\n published_jobs_count += 1\n\n msg = '''\n <a href=\"{draft_jobs_link}\">{draft_jobs_count} Draft Jobs</a>\n -\n <a href=\"{scheduled_jobs_link}\">{scheduled_jobs_count} Scheduled Jobs</a>\n -\n <a href=\"{published_jobs_link}\">{published_jobs_count} Published Jobs</a>\n -\n <a href=\"{all_jobs_link}\">All Jobs</a>\n <a href=\"{add_job_link}\" id=\"addJobButton\">Add Job</a>\n\n '''.format(\n draft_jobs_link=changelist_url + '&status__exact={}'.format(models.Job.DRAFT),\n draft_jobs_count=draft_jobs_count,\n scheduled_jobs_link=changelist_url + '&status__exact={}'.format(models.Job.SCHEDULED),\n scheduled_jobs_count=scheduled_jobs_count,\n published_jobs_link=changelist_url + '&status__exact={}'.format(models.Job.PUBLISHED),\n published_jobs_count=published_jobs_count,\n all_jobs_link=changelist_url,\n add_job_link=reverse('admin:base_job_add') + '?snippet={}'.format(obj.id),\n )\n return mark_safe(msg)\n job_status.short_description = 'Jobs'\n\n def change_view(self, request, *args, **kwargs):\n if request.method == 'POST' and '_saveasnew' in request.POST:\n # Always saved cloned snippets as un-published and un-check ready for review.\n post_data = request.POST.copy()\n post_data['status'] = models.STATUS_CHOICES['Draft']\n request.POST = post_data\n return super().change_view(request, *args, **kwargs)\n\n def get_readonly_fields(self, request, obj):\n fields = copy.copy(self.readonly_fields)\n if obj is None:\n fields.append('status')\n return fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request).prefetch_related('tags')\n return queryset\n\n def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n form.current_user = request.user\n return form\n\n def custom_name_with_tags(self, obj):\n template = get_template('base/snippets_custom_name_with_tags.jinja')\n return mark_safe(template.render({'obj': obj}))\n custom_name_with_tags.short_description = 'Name'\n\n\nclass CampaignAdmin(RelatedJobsMixin, admin.ModelAdmin):\n readonly_fields = [\n 'created',\n 'modified',\n 'creator',\n 'related_published_jobs',\n 'related_total_jobs',\n 'jobs_list',\n ]\n prepopulated_fields = {\n 'slug': ('name',)\n }\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Jobs', {\n 'fields': (\n 'related_published_jobs',\n 'related_total_jobs',\n 'jobs_list',\n ),\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = [\n 'name',\n ]\n list_display = [\n 'name',\n 'related_total_jobs',\n 'related_published_jobs',\n ]\n list_filter = [\n filters.RelatedPublishedASRSnippetFilter,\n filters.ChannelFilter,\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippetsJobs.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass CategoryAdmin(RelatedSnippetsMixin, admin.ModelAdmin):\n readonly_fields = [\n 'created',\n 'modified',\n 'creator',\n 'snippet_list',\n 'related_total_snippets',\n 'related_published_jobs',\n ]\n fieldsets = [\n ('ID', {\n 'fields': (\n 'name',\n 'description',\n )\n }),\n ('Snippets', {\n 'fields': (\n 'related_published_jobs',\n 'related_total_snippets',\n 'snippet_list',\n ),\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n ]\n search_fields = [\n 'name',\n 'description',\n ]\n list_display = [\n 'name',\n 'related_published_jobs',\n 'related_total_snippets',\n ]\n list_filter = [\n filters.RelatedPublishedASRSnippetFilter,\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippetsJobs.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.category')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(RelatedJobsMixin, admin.ModelAdmin):\n form = forms.TargetAdminForm\n save_on_top = True\n readonly_fields = [\n 'created',\n 'modified',\n 'creator',\n 'jexl_expr',\n 'jobs_list',\n 'related_total_jobs',\n 'related_published_jobs',\n ]\n filter_horizontal = [\n 'client_match_rules',\n ]\n search_fields = [\n 'name',\n ]\n list_display = [\n 'name',\n 'related_published_jobs',\n 'related_total_jobs',\n ]\n fieldsets = [\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_total_bookmarks_count',\n 'filtr_operating_system',\n )\n }),\n ('Addons', {\n 'fields': (\n 'filtr_can_install_addons',\n 'filtr_total_addons',\n 'filtr_browser_addon',\n )\n }),\n ('Accounts and Sync', {\n 'fields': (\n 'filtr_uses_firefox_sync',\n 'filtr_desktop_devices_count',\n 'filtr_mobile_devices_count',\n 'filtr_total_devices_count',\n 'filtr_firefox_service',\n ),\n }),\n ('Advanced Targeting', {\n 'fields': (\n 'client_match_rules',\n )\n }),\n ('Jobs', {\n 'fields': (\n 'related_published_jobs',\n 'related_total_jobs',\n 'jobs_list',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n ]\n list_filter = [\n filters.RelatedPublishedASRSnippetFilter,\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippetsJobs.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n\n\nclass LocaleAdmin(admin.ModelAdmin):\n list_display = ('name', 'code')\n search_fields = (\n 'name',\n 'code',\n )\n\n\nclass JobAdmin(admin.ModelAdmin):\n save_on_top = True\n preserve_filters = True\n filter_horizontal = [\n 'targets',\n ]\n list_display = [\n 'id',\n 'snippet_name',\n 'target_list',\n 'job_status',\n 'publish_start',\n 'publish_end',\n 'metric_impressions_humanized',\n 'metric_clicks_humanized',\n 'metric_blocks_humanized',\n ]\n list_display_links = [\n 'id',\n 'snippet_name',\n ]\n list_filter = [\n 'status',\n ('campaign', RelatedDropdownFilter),\n ('targets', RelatedOnlyDropdownFilter),\n ('snippet__locale', RelatedOnlyDropdownFilter),\n filters.ChannelFilter,\n ]\n search_fields = [\n 'id',\n 'uuid',\n 'snippet__id',\n 'snippet__name',\n 'campaign__name',\n ]\n autocomplete_fields = [\n 'snippet',\n 'campaign',\n ]\n readonly_fields = [\n 'snippet_name_linked',\n 'creator',\n 'job_status',\n 'uuid',\n 'id',\n 'created',\n 'modified',\n 'metric_impressions_humanized',\n 'metric_clicks_humanized',\n 'metric_blocks_humanized',\n 'metric_last_update',\n 'redash_link',\n ]\n fieldsets = [\n ('ID', {\n 'fields': ('id', ('job_status', 'completed_on'), 'snippet_name_linked', 'creator')\n }),\n ('Content', {\n 'fields': ('snippet', 'campaign')\n }),\n ('Targeting', {\n 'fields': ('targets', 'weight',)\n }),\n ('Publishing Dates', {\n 'fields': (('publish_start', 'publish_end'),)\n }),\n ('Global Limits', {\n 'fields': ((\n 'limit_impressions',\n 'limit_clicks',\n 'limit_blocks',\n ),),\n }),\n ('Client Limits', {\n 'fields': (\n 'client_limit_lifetime',\n ('client_limit_per_hour',\n 'client_limit_per_day',\n 'client_limit_per_week',\n 'client_limit_per_fortnight',\n 'client_limit_per_month',),\n ),\n 'description': (\n '''\n Limit the number of impressions of this Job per Firefox Client.<br/><br/>\n Examples:<br/>\n <ol>\n <li>If <code>Max Weekly Impressions</code> is set to 2, each user will see this Job <i>at most</i> 2 times within 7 days.</li>\n <li>Limits can be combined: If <code>Max Weekly Impressions</code> is set to 2 and <code>Max Monthly Impressions</code> is set to 4,\n each user will see this Job <i>at most</i> 2 times within 7 days and <i>at most</i> 4 times within 30 days.</li>\n </ol>\n <strong>Note</strong>: Counting starts from the time a user gets their first impression. For example when a user first time sees a Job on the 10th day of a month, the fortnight counter will expire on the 25th.<br/>\n <strong>Note</strong>: This functionality <i>does not</i> guaranty the minimum number of impressions per user but it enforces that a Job won't appear more than planned.\n '''), # noqa\n }),\n ('Metrics', {\n 'fields': (\n (\n 'metric_impressions_humanized',\n 'metric_clicks_humanized',\n 'metric_blocks_humanized',\n ),\n 'metric_last_update',\n 'redash_link',\n ),\n }),\n ('Other Info', {\n 'fields': (('created', 'modified'),),\n }),\n ('Advanced', {\n 'fields': ('distribution',),\n }),\n ]\n actions = [\n 'action_schedule_job',\n 'action_cancel_job',\n ]\n\n class Media:\n css = {\n 'all': [\n 'css/admin/JobAdmin.css',\n 'css/admin/descriptionColorize.css',\n 'css/admin/IDFieldHighlight.css',\n ]\n }\n js = [\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n ]\n\n def snippet_name(self, obj):\n return obj.snippet.name\n\n def snippet_name_linked(self, obj):\n return mark_safe(\n '<a href=\"{}\">{}</a>'.format(\n reverse('admin:base_asrsnippet_change', args=[obj.snippet.id]), obj.snippet.name)\n )\n snippet_name_linked.short_description = 'Link to Snippet'\n\n def target_list(self, obj):\n return mark_safe(\n '<ul>' +\n ''.join([\n f'<li> {target}' for target in obj.targets.values_list('name', flat=True)\n ]) +\n '</ul>'\n )\n target_list.short_description = 'Targets'\n\n def job_status(self, obj):\n msg = obj.get_status_display()\n return mark_safe(\n '<span id=\"job_status\" class={color_class}>{msg}</span>'.format(\n color_class=msg.lower(), msg=msg\n )\n )\n job_status.short_description = 'Status'\n\n def metric_impressions_humanized(self, obj):\n return intcomma(obj.metric_impressions)\n metric_impressions_humanized.short_description = 'Impressions'\n\n def metric_clicks_humanized(self, obj):\n if obj.metric_clicks == 0:\n return 0\n ratio = (obj.metric_clicks / obj.metric_impressions) * 100\n ratio_class = 'ratio-red' if ratio < 0.02 else 'ratio-green'\n return format_html('<span class=\"{}\">{} ({:.2f}%)</span>'.format(\n ratio_class, intcomma(obj.metric_clicks), ratio\n ))\n metric_clicks_humanized.short_description = 'Clicks'\n\n def metric_blocks_humanized(self, obj):\n if obj.metric_blocks == 0:\n return 0\n ratio = (obj.metric_blocks / obj.metric_impressions) * 100\n ratio_class = 'ratio-red' if ratio >= 0.25 else 'ratio-green'\n return format_html('<span class=\"{}\">{} ({:.2f}%)</span>'.format(\n ratio_class, intcomma(obj.metric_blocks), ratio\n ))\n metric_blocks_humanized.short_description = 'Blocks'\n\n def redash_link(self, obj):\n link = (f'{settings.REDASH_ENDPOINT}/queries/{settings.REDASH_QUERY_ID}/'\n f'?p_start_date_{settings.REDASH_QUERY_ID}={obj.publish_start.strftime(\"%Y%m%d\")}'\n f'&p_end_date_{settings.REDASH_QUERY_ID}={obj.publish_end.strftime(\"%Y%m%d\")}'\n f'&p_message_id_{settings.REDASH_QUERY_ID}={obj.id}#161888')\n return format_html(f'<a href=\"{link}\">Explore</a>')\n redash_link.short_description = 'Explore in Redash'\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n super().save_model(request, obj, form, change)\n\n def has_change_permission(self, request, obj=None):\n \"\"\" Allow edit only during Draft stage. \"\"\"\n if obj and obj.status == models.Job.DRAFT:\n return True\n return False\n\n def has_delete_permission(self, request, obj=None):\n \"\"\" Allow deletion only during Draft stage. \"\"\"\n if obj and obj.status == models.Job.DRAFT:\n return True\n return False\n\n def has_publish_permission(self, request):\n return request.user.has_perm('base.change_job')\n\n def response_change(self, request, obj):\n # Add logs using admin system\n if '_cancel' in request.POST:\n obj.change_status(status=models.Job.CANCELED, user=request.user)\n return HttpResponseRedirect('.')\n elif '_schedule' in request.POST:\n obj.change_status(status=models.Job.SCHEDULED, user=request.user)\n return HttpResponseRedirect('.')\n elif '_duplicate' in request.POST:\n new_job = obj.duplicate(request.user)\n return HttpResponseRedirect(new_job.get_admin_url(full=False))\n return super().response_change(request, obj)\n\n def _changeform_view(self, request, *args, **kwargs):\n view = super()._changeform_view(request, *args, **kwargs)\n if hasattr(view, 'context_data'):\n obj = view.context_data['original']\n if obj and self.has_publish_permission(request):\n if obj.status in [models.Job.PUBLISHED, models.Job.SCHEDULED]:\n view.context_data['show_cancel'] = True\n elif obj.status == models.Job.DRAFT:\n view.context_data['show_schedule'] = True\n view.context_data['show_duplicate'] = True\n return view\n\n def _action_status_change(self, action, request, queryset):\n if action == 'schedule':\n status = models.Job.SCHEDULED\n no_action_message = 'Skipped {} already scheduled and published Jobs.'\n success_message = 'Scheduled {} Jobs.'\n clean_queryset = queryset.filter(status=models.Job.DRAFT)\n elif action == 'cancel':\n status = models.Job.CANCELED\n no_action_message = 'Skipped {} already canceled or completed Jobs.'\n success_message = 'Canceled {} Jobs.'\n clean_queryset = queryset.filter(\n Q(status=models.Job.PUBLISHED) |\n Q(status=models.Job.SCHEDULED) |\n Q(status=models.Job.DRAFT)\n )\n else:\n messages.success(request, 'Error no action')\n return\n\n no_jobs = clean_queryset.count()\n no_already_scheduled_jobs = queryset.count() - no_jobs\n\n for job in clean_queryset:\n job.change_status(status=status, user=request.user)\n\n if no_already_scheduled_jobs:\n messages.warning(request, no_action_message.format(no_already_scheduled_jobs))\n messages.success(request, success_message.format(no_jobs))\n\n def action_schedule_job(self, request, queryset):\n self._action_status_change('schedule', request, queryset)\n action_schedule_job.short_description = 'Schedule selected Jobs'\n action_schedule_job.allowed_permissions = (\n 'publish',\n )\n\n def action_cancel_job(self, request, queryset):\n self._action_status_change('cancel', request, queryset)\n action_cancel_job.short_description = 'Cancel selected Jobs'\n action_cancel_job.allowed_permissions = (\n 'publish',\n )\n\n\nclass DistributionAdmin(admin.ModelAdmin):\n save_on_top = True\n\n\nclass DailyJobMetrics(admin.ModelAdmin):\n list_display = ('id', 'job', 'data_fetched_on')\n search_fields = ('job__id', 'job__snippet__name', 'job__snippet__id')\n fieldsets = [\n ('Metrics', {\n 'fields': (\n 'job',\n 'date',\n 'impressions',\n 'clicks',\n 'blocks',\n ),\n }),\n ]\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n", "path": "snippets/base/admin/adminmodels.py" } ]
[ { "content": "import copy\nimport re\n\nfrom django.conf import settings\nfrom django.contrib import admin, messages\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.db.models import TextField, Q\nfrom django.http import HttpResponseRedirect\nfrom django.template.loader import get_template\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom django_ace import AceWidget\nfrom django_admin_listfilter_dropdown.filters import (RelatedDropdownFilter,\n RelatedOnlyDropdownFilter)\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom reversion.admin import VersionAdmin\nfrom taggit_helpers.admin import TaggitListFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.admin import actions, filters\n\n\nMATCH_LOCALE_REGEX = re.compile(r'(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass RelatedJobsMixin():\n def related_published_jobs(self, obj):\n return obj.jobs.filter(status=models.Job.PUBLISHED).count()\n\n def related_total_jobs(self, obj):\n return obj.jobs.count()\n\n def jobs_list(self, obj):\n \"\"\"List Related Jobs.\"\"\"\n template = get_template('base/jobs_related_with_obj.jinja')\n return mark_safe(\n template.render({\n 'jobs': obj.jobs.all().order_by('-id')\n })\n )\n\n\nclass RelatedSnippetsMixin():\n def related_published_jobs(self, obj):\n return models.Job.objects.filter(\n status=models.Job.PUBLISHED, snippet__in=obj.snippets.all()).count()\n\n def related_total_snippets(self, obj):\n return obj.snippets.count()\n\n def snippet_list(self, obj):\n \"\"\"List Related Snippets.\"\"\"\n template = get_template('base/snippets_related_with_obj.jinja')\n return mark_safe(\n template.render({\n 'snippets': obj.snippets.all().order_by('-id')\n })\n )\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = models.JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n class Media:\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n\nclass IconAdmin(RelatedSnippetsMixin, admin.ModelAdmin):\n search_fields = [\n 'name',\n 'image',\n ]\n readonly_fields = [\n 'height',\n 'width',\n 'size',\n 'preview',\n 'creator',\n 'created',\n 'snippet_list',\n 'related_total_snippets',\n 'related_published_jobs',\n ]\n list_display_links = [\n 'id',\n 'name',\n ]\n list_display = [\n 'id',\n 'name',\n 'width',\n 'height',\n 'related_total_snippets',\n 'related_published_jobs',\n 'preview',\n ]\n list_filter = [\n filters.IconRelatedPublishedASRSnippetFilter,\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippetsJobs.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def size(self, obj):\n return '{:.0f} KiB'.format(obj.image.size / 1024)\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n super().save_model(request, obj, form, change)\n\n def preview(self, obj):\n template = get_template('base/preview_image.jinja')\n return mark_safe(template.render({'image': obj.image}))\n\n\nclass SimpleTemplateInline(admin.StackedInline):\n model = models.SimpleTemplate\n form = forms.SimpleTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_snippet',\n ]\n raw_id_fields = [\n 'section_title_icon',\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': ('title_icon', 'title'),\n }),\n ('Section', {\n 'fields': ('section_title_icon', 'section_title_text', 'section_title_url',),\n }),\n ('Main', {\n 'fields': ('icon', 'text', 'button_label',\n 'button_url', 'button_color', 'button_background_color'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'tall', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FundraisingTemplateInline(admin.StackedInline):\n model = models.FundraisingTemplate\n form = forms.FundraisingTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'eoy_snippet',\n ]\n raw_id_fields = [\n 'title_icon',\n 'icon',\n ]\n\n fieldsets = (\n ('Title', {\n 'fields': (\n 'title_icon',\n 'title'\n ),\n }),\n ('Main', {\n 'fields': (\n 'icon',\n 'text',\n 'text_color',\n 'background_color',\n 'highlight_color',\n )\n }),\n ('Form Configuration', {\n 'fields': (\n 'donation_form_url',\n 'currency_code',\n 'locale',\n 'selected_button',\n 'button_label',\n 'button_color',\n 'button_background_color',\n 'monthly_checkbox_label_text',\n )\n }),\n ('Donation', {\n 'fields': (\n ('donation_amount_first', 'donation_amount_second',\n 'donation_amount_third', 'donation_amount_fourth',),\n )\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'test', 'do_not_autoblock'),\n })\n\n )\n\n\nclass FxASignupTemplateInline(admin.StackedInline):\n model = models.FxASignupTemplate\n form = forms.FxASignupTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'fxa_signup_snippet',\n ]\n raw_id_fields = [\n 'scene1_section_title_icon',\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Section', {\n 'fields': (\n 'scene1_section_title_icon',\n 'scene1_section_title_text',\n 'scene1_section_title_url',\n )\n }),\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title',\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_dismiss_button_text',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'utm_term',\n 'utm_campaign',\n 'block_button_text',\n 'do_not_autoblock',\n ),\n })\n )\n\n\nclass NewsletterTemplateInline(admin.StackedInline):\n model = models.NewsletterTemplate\n form = forms.NewsletterTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'newsletter_snippet',\n ]\n raw_id_fields = [\n 'scene1_section_title_icon',\n 'scene1_title_icon',\n 'scene1_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Section', {\n 'fields': (\n 'scene1_section_title_icon',\n 'scene1_section_title_text',\n 'scene1_section_title_url',\n )\n }),\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_text',\n 'scene2_button_label',\n 'scene2_email_placeholder_text',\n 'scene2_privacy_html',\n 'scene2_newsletter',\n 'scene2_dismiss_button_text',\n 'locale',\n 'success_text',\n 'error_text',\n 'retry_button_label',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock',\n ),\n })\n )\n\n\nclass SendToDeviceTemplateInline(admin.StackedInline):\n model = models.SendToDeviceTemplate\n form = forms.SendToDeviceTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'send_to_device_snippet',\n ]\n raw_id_fields = [\n 'scene1_section_title_icon',\n 'scene1_title_icon',\n 'scene1_icon',\n 'scene2_icon',\n ]\n\n fieldsets = (\n ('Scene 1 Section', {\n 'fields': (\n 'scene1_section_title_icon',\n 'scene1_section_title_text',\n 'scene1_section_title_url',\n )\n }),\n ('Scene 1 Title', {\n 'fields': (\n 'scene1_title_icon',\n 'scene1_title'\n ),\n }),\n ('Scene 1 Main', {\n 'fields': (\n 'scene1_icon',\n 'scene1_text',\n 'scene1_button_label',\n 'scene1_button_color',\n 'scene1_button_background_color',\n )\n }),\n ('Scene 2 Title', {\n 'fields': ('scene2_title',),\n }),\n ('Scene 2 Main', {\n 'fields': (\n 'scene2_icon',\n 'scene2_text',\n\n 'scene2_button_label',\n 'scene2_input_placeholder',\n 'scene2_disclaimer_html',\n 'scene2_dismiss_button_text',\n\n 'locale',\n ('include_sms', 'message_id_sms',),\n 'country',\n 'message_id_email',\n 'success_title',\n 'success_text',\n 'error_text',\n 'retry_button_label',\n )\n }),\n\n ('Extra', {\n 'fields': (\n 'block_button_text',\n 'do_not_autoblock',\n ),\n })\n )\n\n\nclass SimpleBelowSearchTemplateInline(admin.StackedInline):\n model = models.SimpleBelowSearchTemplate\n form = forms.SimpleBelowSearchTemplateForm\n can_delete = False\n classes = [\n 'inline-template',\n 'simple_below_search_snippet',\n ]\n raw_id_fields = [\n 'icon',\n ]\n\n fieldsets = (\n ('Main', {\n 'fields': ('icon', 'title', 'text', 'button_label',\n 'button_url', 'button_color', 'button_background_color'),\n }),\n ('Extra', {\n 'fields': ('block_button_text', 'do_not_autoblock'),\n })\n\n )\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n inlines = [\n SimpleTemplateInline,\n FundraisingTemplateInline,\n FxASignupTemplateInline,\n NewsletterTemplateInline,\n SendToDeviceTemplateInline,\n SimpleBelowSearchTemplateInline,\n ]\n list_display_links = [\n 'id',\n ]\n list_display = [\n 'id',\n 'custom_name_with_tags',\n 'snippet_status',\n 'locale',\n 'modified',\n ]\n list_filter = [\n filters.TemplateFilter,\n ['locale', RelatedDropdownFilter],\n ['jobs__targets', RelatedOnlyDropdownFilter],\n 'jobs__status',\n ['jobs__campaign', RelatedDropdownFilter],\n TaggitListFilter,\n ['category', RelatedDropdownFilter],\n filters.ModifiedFilter,\n ]\n search_fields = [\n 'name',\n 'id',\n 'jobs__campaign__name',\n 'jobs__targets__name',\n 'category__name',\n ]\n autocomplete_fields = [\n 'category',\n ]\n preserve_filters = True\n readonly_fields = [\n 'id',\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n 'job_status',\n 'snippet_status',\n ]\n actions = [\n actions.duplicate_snippets_action,\n ]\n save_on_top = True\n save_as = True\n view_on_site = False\n\n fieldsets = (\n ('ID', {\n 'fields': (\n 'id',\n 'name',\n 'tags',\n 'creator',\n 'category',\n 'preview_url_light_theme',\n 'preview_url_dark_theme',\n )\n }),\n ('Status', {\n 'fields': (\n 'snippet_status',\n 'job_status',\n )\n }),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> opens Firefox Accounts</li>\n <li><code>special:monitor</code> links User to Firefox Monitor and directly authenticates them. Works only in buttons. Works only after Firefox 69.</li>\n <li><code>special:about:ABOUT_PAGE</code> links to an About page. Get a list of About pages by typing <code>about:about</code> in your awesome bar. Example: <code>special:about:protections</code>.\n <li><code>special:preferences</code> opens the Firefox Preferences tab. Example: <code>special:preferences</code>.\n <li><code>special:highlight:HIGHLIGHT</code> highlights a button in the browser chrome. Get a list of <a href=\"https://bedrock.readthedocs.io/en/latest/uitour.html#showhighlight-target-effect\">available highlights</a>. Example: <code>special:highlight:logins</code>. Works only after Firefox 71.\n <li><code>special:menu:MENU</code> opens a targeted menu in the browser chrome. Get a list of <a href=\"https://bedrock.readthedocs.io/en/latest/uitour.html#showmenu-target-callback\">available menus</a>. Example: <code>special:menu:appMenu</code>.\n </ol><br/>\n <strong>Content Variables:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n Similarly <code>[[campaign_slug]]</code> gets replaced by Campaign Slug,\n <code>[[channels]]</code> by targeted channels, <code>[[job_id]]</code>\n by Job ID.\n <br/>\n Example: This is a <code>&lt;a href=&quot;https://example.com?utm_term=[[snippet_id]]&quot;&gt;link&lt;/a&gt;</code>\n <br/>\n ''' # noqa\n ),\n 'fields': (\n 'status',\n 'locale',\n 'template_chooser',\n ),\n 'classes': ('template-fieldset',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified')),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ASRSnippetAdmin.css',\n 'css/admin/descriptionColorize.css',\n 'css/admin/IDFieldHighlight.css',\n 'css/admin/InlineTemplates.css',\n 'css/admin/CustomNameWithTags.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n 'js/clipboard.min.js',\n 'js/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url_light_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlLight\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlLight\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_light_theme.short_description = 'Light Themed Preview URL'\n\n def preview_url_dark_theme(self, obj):\n text = f'''\n <span id=\"previewLinkUrlDark\">{obj.get_preview_url(dark=True)}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrlDark\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n preview_url_dark_theme.short_description = 'Dark Themed Preview URL'\n\n def snippet_status(self, obj):\n if obj.jobs.filter(status=models.Job.PUBLISHED).exists():\n msg = 'Published'\n elif obj.jobs.filter(status=models.Job.SCHEDULED).exists():\n msg = 'Scheduled'\n else:\n msg = 'Not Scheduled'\n return mark_safe(\n '<span id=\"snippet_status\" class={color_class}>{msg}</span>'.format(\n color_class=msg.lower(), msg=msg\n )\n )\n snippet_status.short_description = 'Status'\n\n def job_status(self, obj):\n changelist_url = '{reverse}?snippet__id__exact={id}'.format(\n reverse=reverse('admin:base_job_changelist'),\n id=obj.id,\n )\n draft_jobs_count = scheduled_jobs_count = published_jobs_count = 0\n # Count job types in Python to avoid multiple DB queries.\n for job in obj.jobs.all():\n if job.status == models.Job.DRAFT:\n draft_jobs_count += 1\n elif job.status == models.Job.SCHEDULED:\n scheduled_jobs_count += 1\n elif job.status == models.Job.PUBLISHED:\n published_jobs_count += 1\n\n msg = '''\n <a href=\"{draft_jobs_link}\">{draft_jobs_count} Draft Jobs</a>\n -\n <a href=\"{scheduled_jobs_link}\">{scheduled_jobs_count} Scheduled Jobs</a>\n -\n <a href=\"{published_jobs_link}\">{published_jobs_count} Published Jobs</a>\n -\n <a href=\"{all_jobs_link}\">All Jobs</a>\n <a href=\"{add_job_link}\" id=\"addJobButton\">Add Job</a>\n\n '''.format(\n draft_jobs_link=changelist_url + '&status__exact={}'.format(models.Job.DRAFT),\n draft_jobs_count=draft_jobs_count,\n scheduled_jobs_link=changelist_url + '&status__exact={}'.format(models.Job.SCHEDULED),\n scheduled_jobs_count=scheduled_jobs_count,\n published_jobs_link=changelist_url + '&status__exact={}'.format(models.Job.PUBLISHED),\n published_jobs_count=published_jobs_count,\n all_jobs_link=changelist_url,\n add_job_link=reverse('admin:base_job_add') + '?snippet={}'.format(obj.id),\n )\n return mark_safe(msg)\n job_status.short_description = 'Jobs'\n\n def change_view(self, request, *args, **kwargs):\n if request.method == 'POST' and '_saveasnew' in request.POST:\n # Always saved cloned snippets as un-published and un-check ready for review.\n post_data = request.POST.copy()\n post_data['status'] = models.STATUS_CHOICES['Draft']\n request.POST = post_data\n return super().change_view(request, *args, **kwargs)\n\n def get_readonly_fields(self, request, obj):\n fields = copy.copy(self.readonly_fields)\n if obj is None:\n fields.append('status')\n return fields\n\n def get_queryset(self, request):\n queryset = super().get_queryset(request).prefetch_related('tags')\n return queryset\n\n def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n form.current_user = request.user\n return form\n\n def custom_name_with_tags(self, obj):\n template = get_template('base/snippets_custom_name_with_tags.jinja')\n return mark_safe(template.render({'obj': obj}))\n custom_name_with_tags.short_description = 'Name'\n\n\nclass CampaignAdmin(RelatedJobsMixin, admin.ModelAdmin):\n readonly_fields = [\n 'created',\n 'modified',\n 'creator',\n 'related_published_jobs',\n 'related_total_jobs',\n 'jobs_list',\n ]\n prepopulated_fields = {\n 'slug': ('name',)\n }\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Jobs', {\n 'fields': (\n 'related_published_jobs',\n 'related_total_jobs',\n 'jobs_list',\n ),\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = [\n 'name',\n ]\n list_display = [\n 'name',\n 'related_total_jobs',\n 'related_published_jobs',\n ]\n list_filter = [\n filters.RelatedPublishedASRSnippetFilter,\n filters.ChannelFilter,\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippetsJobs.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass CategoryAdmin(RelatedSnippetsMixin, admin.ModelAdmin):\n readonly_fields = [\n 'created',\n 'modified',\n 'creator',\n 'snippet_list',\n 'related_total_snippets',\n 'related_published_jobs',\n ]\n fieldsets = [\n ('ID', {\n 'fields': (\n 'name',\n 'description',\n )\n }),\n ('Snippets', {\n 'fields': (\n 'related_published_jobs',\n 'related_total_snippets',\n 'snippet_list',\n ),\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n ]\n search_fields = [\n 'name',\n 'description',\n ]\n list_display = [\n 'name',\n 'related_published_jobs',\n 'related_total_snippets',\n ]\n list_filter = [\n filters.RelatedPublishedASRSnippetFilter,\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippetsJobs.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.category')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(RelatedJobsMixin, admin.ModelAdmin):\n form = forms.TargetAdminForm\n save_on_top = True\n readonly_fields = [\n 'created',\n 'modified',\n 'creator',\n 'jexl_expr',\n 'jobs_list',\n 'related_total_jobs',\n 'related_published_jobs',\n ]\n filter_horizontal = [\n 'client_match_rules',\n ]\n search_fields = [\n 'name',\n ]\n list_display = [\n 'name',\n 'related_published_jobs',\n 'related_total_jobs',\n ]\n fieldsets = [\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_total_bookmarks_count',\n 'filtr_operating_system',\n )\n }),\n ('Addons', {\n 'fields': (\n 'filtr_can_install_addons',\n 'filtr_total_addons',\n 'filtr_browser_addon',\n )\n }),\n ('Accounts and Sync', {\n 'fields': (\n 'filtr_uses_firefox_sync',\n 'filtr_desktop_devices_count',\n 'filtr_mobile_devices_count',\n 'filtr_total_devices_count',\n 'filtr_firefox_service',\n ),\n }),\n ('Advanced Targeting', {\n 'fields': (\n 'client_match_rules',\n )\n }),\n ('Jobs', {\n 'fields': (\n 'related_published_jobs',\n 'related_total_jobs',\n 'jobs_list',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n ]\n list_filter = [\n filters.RelatedPublishedASRSnippetFilter,\n ]\n\n class Media:\n css = {\n 'all': (\n 'css/admin/ListSnippetsJobs.css',\n )\n }\n js = (\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n\n\nclass LocaleAdmin(admin.ModelAdmin):\n list_display = ('name', 'code')\n search_fields = (\n 'name',\n 'code',\n )\n\n\nclass JobAdmin(admin.ModelAdmin):\n save_on_top = True\n preserve_filters = True\n filter_horizontal = [\n 'targets',\n ]\n list_display = [\n 'id',\n 'snippet_name',\n 'target_list',\n 'job_status',\n 'publish_start',\n 'publish_end',\n 'metric_impressions_humanized',\n 'metric_clicks_humanized',\n 'metric_blocks_humanized',\n ]\n list_display_links = [\n 'id',\n 'snippet_name',\n ]\n list_filter = [\n 'status',\n ('campaign', RelatedDropdownFilter),\n ('targets', RelatedOnlyDropdownFilter),\n ('snippet__locale', RelatedOnlyDropdownFilter),\n filters.ChannelFilter,\n ]\n search_fields = [\n 'id',\n 'uuid',\n 'snippet__id',\n 'snippet__name',\n 'campaign__name',\n ]\n autocomplete_fields = [\n 'snippet',\n 'campaign',\n ]\n readonly_fields = [\n 'snippet_name_linked',\n 'creator',\n 'job_status',\n 'uuid',\n 'id',\n 'created',\n 'modified',\n 'metric_impressions_humanized',\n 'metric_clicks_humanized',\n 'metric_blocks_humanized',\n 'metric_last_update',\n 'redash_link',\n ]\n fieldsets = [\n ('ID', {\n 'fields': ('id', 'job_status', 'snippet_name_linked', 'creator')\n }),\n ('Content', {\n 'fields': ('snippet', 'campaign')\n }),\n ('Targeting', {\n 'fields': ('targets', 'weight',)\n }),\n ('Publishing Dates', {\n 'fields': (('publish_start', 'publish_end'),)\n }),\n ('Global Limits', {\n 'fields': ((\n 'limit_impressions',\n 'limit_clicks',\n 'limit_blocks',\n ),),\n }),\n ('Client Limits', {\n 'fields': (\n 'client_limit_lifetime',\n ('client_limit_per_hour',\n 'client_limit_per_day',\n 'client_limit_per_week',\n 'client_limit_per_fortnight',\n 'client_limit_per_month',),\n ),\n 'description': (\n '''\n Limit the number of impressions of this Job per Firefox Client.<br/><br/>\n Examples:<br/>\n <ol>\n <li>If <code>Max Weekly Impressions</code> is set to 2, each user will see this Job <i>at most</i> 2 times within 7 days.</li>\n <li>Limits can be combined: If <code>Max Weekly Impressions</code> is set to 2 and <code>Max Monthly Impressions</code> is set to 4,\n each user will see this Job <i>at most</i> 2 times within 7 days and <i>at most</i> 4 times within 30 days.</li>\n </ol>\n <strong>Note</strong>: Counting starts from the time a user gets their first impression. For example when a user first time sees a Job on the 10th day of a month, the fortnight counter will expire on the 25th.<br/>\n <strong>Note</strong>: This functionality <i>does not</i> guaranty the minimum number of impressions per user but it enforces that a Job won't appear more than planned.\n '''), # noqa\n }),\n ('Metrics', {\n 'fields': (\n (\n 'metric_impressions_humanized',\n 'metric_clicks_humanized',\n 'metric_blocks_humanized',\n ),\n 'metric_last_update',\n 'redash_link',\n ),\n }),\n ('Other Info', {\n 'fields': (('created', 'modified'),),\n }),\n ('Advanced', {\n 'fields': ('distribution',),\n }),\n ]\n actions = [\n 'action_schedule_job',\n 'action_cancel_job',\n ]\n\n class Media:\n css = {\n 'all': [\n 'css/admin/JobAdmin.css',\n 'css/admin/descriptionColorize.css',\n 'css/admin/IDFieldHighlight.css',\n ]\n }\n js = [\n 'js/admin/jquery.are-you-sure.js',\n 'js/admin/alert-page-leaving.js',\n ]\n\n def snippet_name(self, obj):\n return obj.snippet.name\n\n def snippet_name_linked(self, obj):\n return mark_safe(\n '<a href=\"{}\">{}</a>'.format(\n reverse('admin:base_asrsnippet_change', args=[obj.snippet.id]), obj.snippet.name)\n )\n snippet_name_linked.short_description = 'Link to Snippet'\n\n def target_list(self, obj):\n return mark_safe(\n '<ul>' +\n ''.join([\n f'<li> {target}' for target in obj.targets.values_list('name', flat=True)\n ]) +\n '</ul>'\n )\n target_list.short_description = 'Targets'\n\n def job_status(self, obj):\n msg = obj.get_status_display()\n return mark_safe(\n '<span id=\"job_status\" class={color_class}>{msg}</span>'.format(\n color_class=msg.lower(), msg=msg\n )\n )\n job_status.short_description = 'Status'\n\n def metric_impressions_humanized(self, obj):\n return intcomma(obj.metric_impressions)\n metric_impressions_humanized.short_description = 'Impressions'\n\n def metric_clicks_humanized(self, obj):\n if obj.metric_clicks == 0:\n return 0\n ratio = (obj.metric_clicks / obj.metric_impressions) * 100\n ratio_class = 'ratio-red' if ratio < 0.02 else 'ratio-green'\n return format_html('<span class=\"{}\">{} ({:.2f}%)</span>'.format(\n ratio_class, intcomma(obj.metric_clicks), ratio\n ))\n metric_clicks_humanized.short_description = 'Clicks'\n\n def metric_blocks_humanized(self, obj):\n if obj.metric_blocks == 0:\n return 0\n ratio = (obj.metric_blocks / obj.metric_impressions) * 100\n ratio_class = 'ratio-red' if ratio >= 0.25 else 'ratio-green'\n return format_html('<span class=\"{}\">{} ({:.2f}%)</span>'.format(\n ratio_class, intcomma(obj.metric_blocks), ratio\n ))\n metric_blocks_humanized.short_description = 'Blocks'\n\n def redash_link(self, obj):\n link = (f'{settings.REDASH_ENDPOINT}/queries/{settings.REDASH_QUERY_ID}/'\n f'?p_start_date_{settings.REDASH_QUERY_ID}={obj.publish_start.strftime(\"%Y%m%d\")}'\n f'&p_end_date_{settings.REDASH_QUERY_ID}={obj.publish_end.strftime(\"%Y%m%d\")}'\n f'&p_message_id_{settings.REDASH_QUERY_ID}={obj.id}#161888')\n return format_html(f'<a href=\"{link}\">Explore</a>')\n redash_link.short_description = 'Explore in Redash'\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n super().save_model(request, obj, form, change)\n\n def has_change_permission(self, request, obj=None):\n \"\"\" Allow edit only during Draft stage. \"\"\"\n if obj and obj.status == models.Job.DRAFT:\n return True\n return False\n\n def has_delete_permission(self, request, obj=None):\n \"\"\" Allow deletion only during Draft stage. \"\"\"\n if obj and obj.status == models.Job.DRAFT:\n return True\n return False\n\n def has_publish_permission(self, request):\n return request.user.has_perm('base.change_job')\n\n def response_change(self, request, obj):\n # Add logs using admin system\n if '_cancel' in request.POST:\n obj.change_status(status=models.Job.CANCELED, user=request.user)\n return HttpResponseRedirect('.')\n elif '_schedule' in request.POST:\n obj.change_status(status=models.Job.SCHEDULED, user=request.user)\n return HttpResponseRedirect('.')\n elif '_duplicate' in request.POST:\n new_job = obj.duplicate(request.user)\n return HttpResponseRedirect(new_job.get_admin_url(full=False))\n return super().response_change(request, obj)\n\n def _changeform_view(self, request, *args, **kwargs):\n view = super()._changeform_view(request, *args, **kwargs)\n if hasattr(view, 'context_data'):\n obj = view.context_data['original']\n if obj and self.has_publish_permission(request):\n if obj.status in [models.Job.PUBLISHED, models.Job.SCHEDULED]:\n view.context_data['show_cancel'] = True\n elif obj.status == models.Job.DRAFT:\n view.context_data['show_schedule'] = True\n view.context_data['show_duplicate'] = True\n return view\n\n def _action_status_change(self, action, request, queryset):\n if action == 'schedule':\n status = models.Job.SCHEDULED\n no_action_message = 'Skipped {} already scheduled and published Jobs.'\n success_message = 'Scheduled {} Jobs.'\n clean_queryset = queryset.filter(status=models.Job.DRAFT)\n elif action == 'cancel':\n status = models.Job.CANCELED\n no_action_message = 'Skipped {} already canceled or completed Jobs.'\n success_message = 'Canceled {} Jobs.'\n clean_queryset = queryset.filter(\n Q(status=models.Job.PUBLISHED) |\n Q(status=models.Job.SCHEDULED) |\n Q(status=models.Job.DRAFT)\n )\n else:\n messages.success(request, 'Error no action')\n return\n\n no_jobs = clean_queryset.count()\n no_already_scheduled_jobs = queryset.count() - no_jobs\n\n for job in clean_queryset:\n job.change_status(status=status, user=request.user)\n\n if no_already_scheduled_jobs:\n messages.warning(request, no_action_message.format(no_already_scheduled_jobs))\n messages.success(request, success_message.format(no_jobs))\n\n def action_schedule_job(self, request, queryset):\n self._action_status_change('schedule', request, queryset)\n action_schedule_job.short_description = 'Schedule selected Jobs'\n action_schedule_job.allowed_permissions = (\n 'publish',\n )\n\n def action_cancel_job(self, request, queryset):\n self._action_status_change('cancel', request, queryset)\n action_cancel_job.short_description = 'Cancel selected Jobs'\n action_cancel_job.allowed_permissions = (\n 'publish',\n )\n\n\nclass DistributionAdmin(admin.ModelAdmin):\n save_on_top = True\n\n\nclass DailyJobMetrics(admin.ModelAdmin):\n list_display = ('id', 'job', 'data_fetched_on')\n search_fields = ('job__id', 'job__snippet__name', 'job__snippet__id')\n fieldsets = [\n ('Metrics', {\n 'fields': (\n 'job',\n 'date',\n 'impressions',\n 'clicks',\n 'blocks',\n ),\n }),\n ]\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n", "path": "snippets/base/admin/adminmodels.py" } ]
diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py index d53494e34..f43c49b8f 100644 --- a/snippets/base/admin/adminmodels.py +++ b/snippets/base/admin/adminmodels.py @@ -1019,7 +1019,7 @@ class JobAdmin(admin.ModelAdmin): ] fieldsets = [ ('ID', { - 'fields': ('id', ('job_status', 'completed_on'), 'snippet_name_linked', 'creator') + 'fields': ('id', 'job_status', 'snippet_name_linked', 'creator') }), ('Content', { 'fields': ('snippet', 'campaign')
encode__django-rest-framework-7708
Invalid `CursorPagination` schema type ## Checklist - [x] I have verified that that issue exists against the `master` branch of Django REST framework. - [x] I have searched for similar issues in both open and closed tickets and cannot find a duplicate. - [x] This is not a usage question. (Those should be directed to the [discussion group](https://groups.google.com/forum/#!forum/django-rest-framework) instead.) - [x] This cannot be dealt with as a third party library. (We prefer new functionality to be [in the form of third party libraries](https://www.django-rest-framework.org/community/third-party-packages/#about-third-party-packages) where possible.) - [x] I have reduced the issue to the simplest possible case. - [ ] I have included a failing test as a pull request. (If you are unable to do so we can still accept the issue.) ## Steps to reproduce 1. Create custom cursor pagination class `pagination.py` ```python from rest_framework.pagination import CursorPagination class AuthLogCursorPagination(CursorPagination): page_size = 50 page_size_query_param = "page_size" ordering = "-created_at" ``` 2. Register `AuthLogCursorPagination` in APIView `views.py` ```python from rest_framework.generic import ListAPIView from app.models import AuthLog from app.api.pagination import AuthLogCursorPagination from app.api.serializers import AuthLogSerializer class AuthLogsAPIView(ListAPIView): queryset = AuthLog.objects.all() serializer_class = AuthLogSerializer pagination_class = AuthLogCursorPagination ``` 3. Register view `urls.py` ```python from rest_framework.schemas import get_schema_view from app.api.views import AuthLogsAPIView urlpatterns = [ path("authlog/", AuthLogsAPIView.as_view()), path("openapi", get_schema_view( title="Your Project", description="API for all things …", version="1.0.0" ), name="openapi-schema"), ] ``` 4. Open /openapi and check /authlog/ endpoint generated spec ## Expected behavior ```yaml /authlog/: get: operationId: listAuthLogs description: '' parameters: - name: cursor required: false in: query description: The pagination cursor value. schema: type: string - name: page_size required: false in: query description: Number of results to return per page. schema: type: integer ``` ## Actual behavior ```yaml /authlog/: get: operationId: listAuthLogs description: '' parameters: - name: cursor required: false in: query description: The pagination cursor value. schema: type: integer # <---- INVALID - name: page_size required: false in: query description: Number of results to return per page. schema: type: integer ``` `cursor` parameter schema type must be `string`, got `integer` instead.
[ { "content": "\"\"\"\nPagination serializers determine the structure of the output that should\nbe used for paginated responses.\n\"\"\"\nfrom base64 import b64decode, b64encode\nfrom collections import OrderedDict, namedtuple\nfrom urllib import parse\n\nfrom django.core.paginator import InvalidPage\nfrom django.core.paginator import Paginator as DjangoPaginator\nfrom django.template import loader\nfrom django.utils.encoding import force_str\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rest_framework.compat import coreapi, coreschema\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom rest_framework.settings import api_settings\nfrom rest_framework.utils.urls import remove_query_param, replace_query_param\n\n\ndef _positive_int(integer_string, strict=False, cutoff=None):\n \"\"\"\n Cast a string to a strictly positive integer.\n \"\"\"\n ret = int(integer_string)\n if ret < 0 or (ret == 0 and strict):\n raise ValueError()\n if cutoff:\n return min(ret, cutoff)\n return ret\n\n\ndef _divide_with_ceil(a, b):\n \"\"\"\n Returns 'a' divided by 'b', with any remainder rounded up.\n \"\"\"\n if a % b:\n return (a // b) + 1\n\n return a // b\n\n\ndef _get_displayed_page_numbers(current, final):\n \"\"\"\n This utility function determines a list of page numbers to display.\n This gives us a nice contextually relevant set of page numbers.\n\n For example:\n current=14, final=16 -> [1, None, 13, 14, 15, 16]\n\n This implementation gives one page to each side of the cursor,\n or two pages to the side when the cursor is at the edge, then\n ensures that any breaks between non-continuous page numbers never\n remove only a single page.\n\n For an alternative implementation which gives two pages to each side of\n the cursor, eg. as in GitHub issue list pagination, see:\n\n https://gist.github.com/tomchristie/321140cebb1c4a558b15\n \"\"\"\n assert current >= 1\n assert final >= current\n\n if final <= 5:\n return list(range(1, final + 1))\n\n # We always include the first two pages, last two pages, and\n # two pages either side of the current page.\n included = {1, current - 1, current, current + 1, final}\n\n # If the break would only exclude a single page number then we\n # may as well include the page number instead of the break.\n if current <= 4:\n included.add(2)\n included.add(3)\n if current >= final - 3:\n included.add(final - 1)\n included.add(final - 2)\n\n # Now sort the page numbers and drop anything outside the limits.\n included = [\n idx for idx in sorted(included)\n if 0 < idx <= final\n ]\n\n # Finally insert any `...` breaks\n if current > 4:\n included.insert(1, None)\n if current < final - 3:\n included.insert(len(included) - 1, None)\n return included\n\n\ndef _get_page_links(page_numbers, current, url_func):\n \"\"\"\n Given a list of page numbers and `None` page breaks,\n return a list of `PageLink` objects.\n \"\"\"\n page_links = []\n for page_number in page_numbers:\n if page_number is None:\n page_link = PAGE_BREAK\n else:\n page_link = PageLink(\n url=url_func(page_number),\n number=page_number,\n is_active=(page_number == current),\n is_break=False\n )\n page_links.append(page_link)\n return page_links\n\n\ndef _reverse_ordering(ordering_tuple):\n \"\"\"\n Given an order_by tuple such as `('-created', 'uuid')` reverse the\n ordering and return a new tuple, eg. `('created', '-uuid')`.\n \"\"\"\n def invert(x):\n return x[1:] if x.startswith('-') else '-' + x\n\n return tuple([invert(item) for item in ordering_tuple])\n\n\nCursor = namedtuple('Cursor', ['offset', 'reverse', 'position'])\nPageLink = namedtuple('PageLink', ['url', 'number', 'is_active', 'is_break'])\n\nPAGE_BREAK = PageLink(url=None, number=None, is_active=False, is_break=True)\n\n\nclass BasePagination:\n display_page_controls = False\n\n def paginate_queryset(self, queryset, request, view=None): # pragma: no cover\n raise NotImplementedError('paginate_queryset() must be implemented.')\n\n def get_paginated_response(self, data): # pragma: no cover\n raise NotImplementedError('get_paginated_response() must be implemented.')\n\n def get_paginated_response_schema(self, schema):\n return schema\n\n def to_html(self): # pragma: no cover\n raise NotImplementedError('to_html() must be implemented to display page controls.')\n\n def get_results(self, data):\n return data['results']\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'\n return []\n\n def get_schema_operation_parameters(self, view):\n return []\n\n\nclass PageNumberPagination(BasePagination):\n \"\"\"\n A simple page number based style that supports page numbers as\n query parameters. For example:\n\n http://api.example.org/accounts/?page=4\n http://api.example.org/accounts/?page=4&page_size=100\n \"\"\"\n # The default page size.\n # Defaults to `None`, meaning pagination is disabled.\n page_size = api_settings.PAGE_SIZE\n\n django_paginator_class = DjangoPaginator\n\n # Client can control the page using this query parameter.\n page_query_param = 'page'\n page_query_description = _('A page number within the paginated result set.')\n\n # Client can control the page size using this query parameter.\n # Default is 'None'. Set to eg 'page_size' to enable usage.\n page_size_query_param = None\n page_size_query_description = _('Number of results to return per page.')\n\n # Set to an integer to limit the maximum page size the client may request.\n # Only relevant if 'page_size_query_param' has also been set.\n max_page_size = None\n\n last_page_strings = ('last',)\n\n template = 'rest_framework/pagination/numbers.html'\n\n invalid_page_message = _('Invalid page.')\n\n def paginate_queryset(self, queryset, request, view=None):\n \"\"\"\n Paginate a queryset if required, either returning a\n page object, or `None` if pagination is not configured for this view.\n \"\"\"\n page_size = self.get_page_size(request)\n if not page_size:\n return None\n\n paginator = self.django_paginator_class(queryset, page_size)\n page_number = self.get_page_number(request, paginator)\n\n try:\n self.page = paginator.page(page_number)\n except InvalidPage as exc:\n msg = self.invalid_page_message.format(\n page_number=page_number, message=str(exc)\n )\n raise NotFound(msg)\n\n if paginator.num_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n self.request = request\n return list(self.page)\n\n def get_page_number(self, request, paginator):\n page_number = request.query_params.get(self.page_query_param, 1)\n if page_number in self.last_page_strings:\n page_number = paginator.num_pages\n return page_number\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.page.paginator.count),\n ('next', self.get_next_link()),\n ('previous', self.get_previous_link()),\n ('results', data)\n ]))\n\n def get_paginated_response_schema(self, schema):\n return {\n 'type': 'object',\n 'properties': {\n 'count': {\n 'type': 'integer',\n 'example': 123,\n },\n 'next': {\n 'type': 'string',\n 'nullable': True,\n 'format': 'uri',\n 'example': 'http://api.example.org/accounts/?{page_query_param}=4'.format(\n page_query_param=self.page_query_param)\n },\n 'previous': {\n 'type': 'string',\n 'nullable': True,\n 'format': 'uri',\n 'example': 'http://api.example.org/accounts/?{page_query_param}=2'.format(\n page_query_param=self.page_query_param)\n },\n 'results': schema,\n },\n }\n\n def get_page_size(self, request):\n if self.page_size_query_param:\n try:\n return _positive_int(\n request.query_params[self.page_size_query_param],\n strict=True,\n cutoff=self.max_page_size\n )\n except (KeyError, ValueError):\n pass\n\n return self.page_size\n\n def get_next_link(self):\n if not self.page.has_next():\n return None\n url = self.request.build_absolute_uri()\n page_number = self.page.next_page_number()\n return replace_query_param(url, self.page_query_param, page_number)\n\n def get_previous_link(self):\n if not self.page.has_previous():\n return None\n url = self.request.build_absolute_uri()\n page_number = self.page.previous_page_number()\n if page_number == 1:\n return remove_query_param(url, self.page_query_param)\n return replace_query_param(url, self.page_query_param, page_number)\n\n def get_html_context(self):\n base_url = self.request.build_absolute_uri()\n\n def page_number_to_url(page_number):\n if page_number == 1:\n return remove_query_param(base_url, self.page_query_param)\n else:\n return replace_query_param(base_url, self.page_query_param, page_number)\n\n current = self.page.number\n final = self.page.paginator.num_pages\n page_numbers = _get_displayed_page_numbers(current, final)\n page_links = _get_page_links(page_numbers, current, page_number_to_url)\n\n return {\n 'previous_url': self.get_previous_link(),\n 'next_url': self.get_next_link(),\n 'page_links': page_links\n }\n\n def to_html(self):\n template = loader.get_template(self.template)\n context = self.get_html_context()\n return template.render(context)\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'\n assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'\n fields = [\n coreapi.Field(\n name=self.page_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Page',\n description=force_str(self.page_query_description)\n )\n )\n ]\n if self.page_size_query_param is not None:\n fields.append(\n coreapi.Field(\n name=self.page_size_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Page size',\n description=force_str(self.page_size_query_description)\n )\n )\n )\n return fields\n\n def get_schema_operation_parameters(self, view):\n parameters = [\n {\n 'name': self.page_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.page_query_description),\n 'schema': {\n 'type': 'integer',\n },\n },\n ]\n if self.page_size_query_param is not None:\n parameters.append(\n {\n 'name': self.page_size_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.page_size_query_description),\n 'schema': {\n 'type': 'integer',\n },\n },\n )\n return parameters\n\n\nclass LimitOffsetPagination(BasePagination):\n \"\"\"\n A limit/offset based style. For example:\n\n http://api.example.org/accounts/?limit=100\n http://api.example.org/accounts/?offset=400&limit=100\n \"\"\"\n default_limit = api_settings.PAGE_SIZE\n limit_query_param = 'limit'\n limit_query_description = _('Number of results to return per page.')\n offset_query_param = 'offset'\n offset_query_description = _('The initial index from which to return the results.')\n max_limit = None\n template = 'rest_framework/pagination/numbers.html'\n\n def paginate_queryset(self, queryset, request, view=None):\n self.limit = self.get_limit(request)\n if self.limit is None:\n return None\n\n self.count = self.get_count(queryset)\n self.offset = self.get_offset(request)\n self.request = request\n if self.count > self.limit and self.template is not None:\n self.display_page_controls = True\n\n if self.count == 0 or self.offset > self.count:\n return []\n return list(queryset[self.offset:self.offset + self.limit])\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('next', self.get_next_link()),\n ('previous', self.get_previous_link()),\n ('results', data)\n ]))\n\n def get_paginated_response_schema(self, schema):\n return {\n 'type': 'object',\n 'properties': {\n 'count': {\n 'type': 'integer',\n 'example': 123,\n },\n 'next': {\n 'type': 'string',\n 'nullable': True,\n 'format': 'uri',\n 'example': 'http://api.example.org/accounts/?{offset_param}=400&{limit_param}=100'.format(\n offset_param=self.offset_query_param, limit_param=self.limit_query_param),\n },\n 'previous': {\n 'type': 'string',\n 'nullable': True,\n 'format': 'uri',\n 'example': 'http://api.example.org/accounts/?{offset_param}=200&{limit_param}=100'.format(\n offset_param=self.offset_query_param, limit_param=self.limit_query_param),\n },\n 'results': schema,\n },\n }\n\n def get_limit(self, request):\n if self.limit_query_param:\n try:\n return _positive_int(\n request.query_params[self.limit_query_param],\n strict=True,\n cutoff=self.max_limit\n )\n except (KeyError, ValueError):\n pass\n\n return self.default_limit\n\n def get_offset(self, request):\n try:\n return _positive_int(\n request.query_params[self.offset_query_param],\n )\n except (KeyError, ValueError):\n return 0\n\n def get_next_link(self):\n if self.offset + self.limit >= self.count:\n return None\n\n url = self.request.build_absolute_uri()\n url = replace_query_param(url, self.limit_query_param, self.limit)\n\n offset = self.offset + self.limit\n return replace_query_param(url, self.offset_query_param, offset)\n\n def get_previous_link(self):\n if self.offset <= 0:\n return None\n\n url = self.request.build_absolute_uri()\n url = replace_query_param(url, self.limit_query_param, self.limit)\n\n if self.offset - self.limit <= 0:\n return remove_query_param(url, self.offset_query_param)\n\n offset = self.offset - self.limit\n return replace_query_param(url, self.offset_query_param, offset)\n\n def get_html_context(self):\n base_url = self.request.build_absolute_uri()\n\n if self.limit:\n current = _divide_with_ceil(self.offset, self.limit) + 1\n\n # The number of pages is a little bit fiddly.\n # We need to sum both the number of pages from current offset to end\n # plus the number of pages up to the current offset.\n # When offset is not strictly divisible by the limit then we may\n # end up introducing an extra page as an artifact.\n final = (\n _divide_with_ceil(self.count - self.offset, self.limit) +\n _divide_with_ceil(self.offset, self.limit)\n )\n\n final = max(final, 1)\n else:\n current = 1\n final = 1\n\n if current > final:\n current = final\n\n def page_number_to_url(page_number):\n if page_number == 1:\n return remove_query_param(base_url, self.offset_query_param)\n else:\n offset = self.offset + ((page_number - current) * self.limit)\n return replace_query_param(base_url, self.offset_query_param, offset)\n\n page_numbers = _get_displayed_page_numbers(current, final)\n page_links = _get_page_links(page_numbers, current, page_number_to_url)\n\n return {\n 'previous_url': self.get_previous_link(),\n 'next_url': self.get_next_link(),\n 'page_links': page_links\n }\n\n def to_html(self):\n template = loader.get_template(self.template)\n context = self.get_html_context()\n return template.render(context)\n\n def get_count(self, queryset):\n \"\"\"\n Determine an object count, supporting either querysets or regular lists.\n \"\"\"\n try:\n return queryset.count()\n except (AttributeError, TypeError):\n return len(queryset)\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'\n assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'\n return [\n coreapi.Field(\n name=self.limit_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Limit',\n description=force_str(self.limit_query_description)\n )\n ),\n coreapi.Field(\n name=self.offset_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Offset',\n description=force_str(self.offset_query_description)\n )\n )\n ]\n\n def get_schema_operation_parameters(self, view):\n parameters = [\n {\n 'name': self.limit_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.limit_query_description),\n 'schema': {\n 'type': 'integer',\n },\n },\n {\n 'name': self.offset_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.offset_query_description),\n 'schema': {\n 'type': 'integer',\n },\n },\n ]\n return parameters\n\n\nclass CursorPagination(BasePagination):\n \"\"\"\n The cursor pagination implementation is necessarily complex.\n For an overview of the position/offset style we use, see this post:\n https://cra.mr/2011/03/08/building-cursors-for-the-disqus-api\n \"\"\"\n cursor_query_param = 'cursor'\n cursor_query_description = _('The pagination cursor value.')\n page_size = api_settings.PAGE_SIZE\n invalid_cursor_message = _('Invalid cursor')\n ordering = '-created'\n template = 'rest_framework/pagination/previous_and_next.html'\n\n # Client can control the page size using this query parameter.\n # Default is 'None'. Set to eg 'page_size' to enable usage.\n page_size_query_param = None\n page_size_query_description = _('Number of results to return per page.')\n\n # Set to an integer to limit the maximum page size the client may request.\n # Only relevant if 'page_size_query_param' has also been set.\n max_page_size = None\n\n # The offset in the cursor is used in situations where we have a\n # nearly-unique index. (Eg millisecond precision creation timestamps)\n # We guard against malicious users attempting to cause expensive database\n # queries, by having a hard cap on the maximum possible size of the offset.\n offset_cutoff = 1000\n\n def paginate_queryset(self, queryset, request, view=None):\n self.page_size = self.get_page_size(request)\n if not self.page_size:\n return None\n\n self.base_url = request.build_absolute_uri()\n self.ordering = self.get_ordering(request, queryset, view)\n\n self.cursor = self.decode_cursor(request)\n if self.cursor is None:\n (offset, reverse, current_position) = (0, False, None)\n else:\n (offset, reverse, current_position) = self.cursor\n\n # Cursor pagination always enforces an ordering.\n if reverse:\n queryset = queryset.order_by(*_reverse_ordering(self.ordering))\n else:\n queryset = queryset.order_by(*self.ordering)\n\n # If we have a cursor with a fixed position then filter by that.\n if current_position is not None:\n order = self.ordering[0]\n is_reversed = order.startswith('-')\n order_attr = order.lstrip('-')\n\n # Test for: (cursor reversed) XOR (queryset reversed)\n if self.cursor.reverse != is_reversed:\n kwargs = {order_attr + '__lt': current_position}\n else:\n kwargs = {order_attr + '__gt': current_position}\n\n queryset = queryset.filter(**kwargs)\n\n # If we have an offset cursor then offset the entire page by that amount.\n # We also always fetch an extra item in order to determine if there is a\n # page following on from this one.\n results = list(queryset[offset:offset + self.page_size + 1])\n self.page = list(results[:self.page_size])\n\n # Determine the position of the final item following the page.\n if len(results) > len(self.page):\n has_following_position = True\n following_position = self._get_position_from_instance(results[-1], self.ordering)\n else:\n has_following_position = False\n following_position = None\n\n if reverse:\n # If we have a reverse queryset, then the query ordering was in reverse\n # so we need to reverse the items again before returning them to the user.\n self.page = list(reversed(self.page))\n\n # Determine next and previous positions for reverse cursors.\n self.has_next = (current_position is not None) or (offset > 0)\n self.has_previous = has_following_position\n if self.has_next:\n self.next_position = current_position\n if self.has_previous:\n self.previous_position = following_position\n else:\n # Determine next and previous positions for forward cursors.\n self.has_next = has_following_position\n self.has_previous = (current_position is not None) or (offset > 0)\n if self.has_next:\n self.next_position = following_position\n if self.has_previous:\n self.previous_position = current_position\n\n # Display page controls in the browsable API if there is more\n # than one page.\n if (self.has_previous or self.has_next) and self.template is not None:\n self.display_page_controls = True\n\n return self.page\n\n def get_page_size(self, request):\n if self.page_size_query_param:\n try:\n return _positive_int(\n request.query_params[self.page_size_query_param],\n strict=True,\n cutoff=self.max_page_size\n )\n except (KeyError, ValueError):\n pass\n\n return self.page_size\n\n def get_next_link(self):\n if not self.has_next:\n return None\n\n if self.page and self.cursor and self.cursor.reverse and self.cursor.offset != 0:\n # If we're reversing direction and we have an offset cursor\n # then we cannot use the first position we find as a marker.\n compare = self._get_position_from_instance(self.page[-1], self.ordering)\n else:\n compare = self.next_position\n offset = 0\n\n has_item_with_unique_position = False\n for item in reversed(self.page):\n position = self._get_position_from_instance(item, self.ordering)\n if position != compare:\n # The item in this position and the item following it\n # have different positions. We can use this position as\n # our marker.\n has_item_with_unique_position = True\n break\n\n # The item in this position has the same position as the item\n # following it, we can't use it as a marker position, so increment\n # the offset and keep seeking to the previous item.\n compare = position\n offset += 1\n\n if self.page and not has_item_with_unique_position:\n # There were no unique positions in the page.\n if not self.has_previous:\n # We are on the first page.\n # Our cursor will have an offset equal to the page size,\n # but no position to filter against yet.\n offset = self.page_size\n position = None\n elif self.cursor.reverse:\n # The change in direction will introduce a paging artifact,\n # where we end up skipping forward a few extra items.\n offset = 0\n position = self.previous_position\n else:\n # Use the position from the existing cursor and increment\n # it's offset by the page size.\n offset = self.cursor.offset + self.page_size\n position = self.previous_position\n\n if not self.page:\n position = self.next_position\n\n cursor = Cursor(offset=offset, reverse=False, position=position)\n return self.encode_cursor(cursor)\n\n def get_previous_link(self):\n if not self.has_previous:\n return None\n\n if self.page and self.cursor and not self.cursor.reverse and self.cursor.offset != 0:\n # If we're reversing direction and we have an offset cursor\n # then we cannot use the first position we find as a marker.\n compare = self._get_position_from_instance(self.page[0], self.ordering)\n else:\n compare = self.previous_position\n offset = 0\n\n has_item_with_unique_position = False\n for item in self.page:\n position = self._get_position_from_instance(item, self.ordering)\n if position != compare:\n # The item in this position and the item following it\n # have different positions. We can use this position as\n # our marker.\n has_item_with_unique_position = True\n break\n\n # The item in this position has the same position as the item\n # following it, we can't use it as a marker position, so increment\n # the offset and keep seeking to the previous item.\n compare = position\n offset += 1\n\n if self.page and not has_item_with_unique_position:\n # There were no unique positions in the page.\n if not self.has_next:\n # We are on the final page.\n # Our cursor will have an offset equal to the page size,\n # but no position to filter against yet.\n offset = self.page_size\n position = None\n elif self.cursor.reverse:\n # Use the position from the existing cursor and increment\n # it's offset by the page size.\n offset = self.cursor.offset + self.page_size\n position = self.next_position\n else:\n # The change in direction will introduce a paging artifact,\n # where we end up skipping back a few extra items.\n offset = 0\n position = self.next_position\n\n if not self.page:\n position = self.previous_position\n\n cursor = Cursor(offset=offset, reverse=True, position=position)\n return self.encode_cursor(cursor)\n\n def get_ordering(self, request, queryset, view):\n \"\"\"\n Return a tuple of strings, that may be used in an `order_by` method.\n \"\"\"\n ordering_filters = [\n filter_cls for filter_cls in getattr(view, 'filter_backends', [])\n if hasattr(filter_cls, 'get_ordering')\n ]\n\n if ordering_filters:\n # If a filter exists on the view that implements `get_ordering`\n # then we defer to that filter to determine the ordering.\n filter_cls = ordering_filters[0]\n filter_instance = filter_cls()\n ordering = filter_instance.get_ordering(request, queryset, view)\n assert ordering is not None, (\n 'Using cursor pagination, but filter class {filter_cls} '\n 'returned a `None` ordering.'.format(\n filter_cls=filter_cls.__name__\n )\n )\n else:\n # The default case is to check for an `ordering` attribute\n # on this pagination instance.\n ordering = self.ordering\n assert ordering is not None, (\n 'Using cursor pagination, but no ordering attribute was declared '\n 'on the pagination class.'\n )\n assert '__' not in ordering, (\n 'Cursor pagination does not support double underscore lookups '\n 'for orderings. Orderings should be an unchanging, unique or '\n 'nearly-unique field on the model, such as \"-created\" or \"pk\".'\n )\n\n assert isinstance(ordering, (str, list, tuple)), (\n 'Invalid ordering. Expected string or tuple, but got {type}'.format(\n type=type(ordering).__name__\n )\n )\n\n if isinstance(ordering, str):\n return (ordering,)\n return tuple(ordering)\n\n def decode_cursor(self, request):\n \"\"\"\n Given a request with a cursor, return a `Cursor` instance.\n \"\"\"\n # Determine if we have a cursor, and if so then decode it.\n encoded = request.query_params.get(self.cursor_query_param)\n if encoded is None:\n return None\n\n try:\n querystring = b64decode(encoded.encode('ascii')).decode('ascii')\n tokens = parse.parse_qs(querystring, keep_blank_values=True)\n\n offset = tokens.get('o', ['0'])[0]\n offset = _positive_int(offset, cutoff=self.offset_cutoff)\n\n reverse = tokens.get('r', ['0'])[0]\n reverse = bool(int(reverse))\n\n position = tokens.get('p', [None])[0]\n except (TypeError, ValueError):\n raise NotFound(self.invalid_cursor_message)\n\n return Cursor(offset=offset, reverse=reverse, position=position)\n\n def encode_cursor(self, cursor):\n \"\"\"\n Given a Cursor instance, return an url with encoded cursor.\n \"\"\"\n tokens = {}\n if cursor.offset != 0:\n tokens['o'] = str(cursor.offset)\n if cursor.reverse:\n tokens['r'] = '1'\n if cursor.position is not None:\n tokens['p'] = cursor.position\n\n querystring = parse.urlencode(tokens, doseq=True)\n encoded = b64encode(querystring.encode('ascii')).decode('ascii')\n return replace_query_param(self.base_url, self.cursor_query_param, encoded)\n\n def _get_position_from_instance(self, instance, ordering):\n field_name = ordering[0].lstrip('-')\n if isinstance(instance, dict):\n attr = instance[field_name]\n else:\n attr = getattr(instance, field_name)\n return str(attr)\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('next', self.get_next_link()),\n ('previous', self.get_previous_link()),\n ('results', data)\n ]))\n\n def get_paginated_response_schema(self, schema):\n return {\n 'type': 'object',\n 'properties': {\n 'next': {\n 'type': 'string',\n 'nullable': True,\n },\n 'previous': {\n 'type': 'string',\n 'nullable': True,\n },\n 'results': schema,\n },\n }\n\n def get_html_context(self):\n return {\n 'previous_url': self.get_previous_link(),\n 'next_url': self.get_next_link()\n }\n\n def to_html(self):\n template = loader.get_template(self.template)\n context = self.get_html_context()\n return template.render(context)\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'\n assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'\n fields = [\n coreapi.Field(\n name=self.cursor_query_param,\n required=False,\n location='query',\n schema=coreschema.String(\n title='Cursor',\n description=force_str(self.cursor_query_description)\n )\n )\n ]\n if self.page_size_query_param is not None:\n fields.append(\n coreapi.Field(\n name=self.page_size_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Page size',\n description=force_str(self.page_size_query_description)\n )\n )\n )\n return fields\n\n def get_schema_operation_parameters(self, view):\n parameters = [\n {\n 'name': self.cursor_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.cursor_query_description),\n 'schema': {\n 'type': 'integer',\n },\n }\n ]\n if self.page_size_query_param is not None:\n parameters.append(\n {\n 'name': self.page_size_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.page_size_query_description),\n 'schema': {\n 'type': 'integer',\n },\n }\n )\n return parameters\n", "path": "rest_framework/pagination.py" } ]
[ { "content": "\"\"\"\nPagination serializers determine the structure of the output that should\nbe used for paginated responses.\n\"\"\"\nfrom base64 import b64decode, b64encode\nfrom collections import OrderedDict, namedtuple\nfrom urllib import parse\n\nfrom django.core.paginator import InvalidPage\nfrom django.core.paginator import Paginator as DjangoPaginator\nfrom django.template import loader\nfrom django.utils.encoding import force_str\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rest_framework.compat import coreapi, coreschema\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom rest_framework.settings import api_settings\nfrom rest_framework.utils.urls import remove_query_param, replace_query_param\n\n\ndef _positive_int(integer_string, strict=False, cutoff=None):\n \"\"\"\n Cast a string to a strictly positive integer.\n \"\"\"\n ret = int(integer_string)\n if ret < 0 or (ret == 0 and strict):\n raise ValueError()\n if cutoff:\n return min(ret, cutoff)\n return ret\n\n\ndef _divide_with_ceil(a, b):\n \"\"\"\n Returns 'a' divided by 'b', with any remainder rounded up.\n \"\"\"\n if a % b:\n return (a // b) + 1\n\n return a // b\n\n\ndef _get_displayed_page_numbers(current, final):\n \"\"\"\n This utility function determines a list of page numbers to display.\n This gives us a nice contextually relevant set of page numbers.\n\n For example:\n current=14, final=16 -> [1, None, 13, 14, 15, 16]\n\n This implementation gives one page to each side of the cursor,\n or two pages to the side when the cursor is at the edge, then\n ensures that any breaks between non-continuous page numbers never\n remove only a single page.\n\n For an alternative implementation which gives two pages to each side of\n the cursor, eg. as in GitHub issue list pagination, see:\n\n https://gist.github.com/tomchristie/321140cebb1c4a558b15\n \"\"\"\n assert current >= 1\n assert final >= current\n\n if final <= 5:\n return list(range(1, final + 1))\n\n # We always include the first two pages, last two pages, and\n # two pages either side of the current page.\n included = {1, current - 1, current, current + 1, final}\n\n # If the break would only exclude a single page number then we\n # may as well include the page number instead of the break.\n if current <= 4:\n included.add(2)\n included.add(3)\n if current >= final - 3:\n included.add(final - 1)\n included.add(final - 2)\n\n # Now sort the page numbers and drop anything outside the limits.\n included = [\n idx for idx in sorted(included)\n if 0 < idx <= final\n ]\n\n # Finally insert any `...` breaks\n if current > 4:\n included.insert(1, None)\n if current < final - 3:\n included.insert(len(included) - 1, None)\n return included\n\n\ndef _get_page_links(page_numbers, current, url_func):\n \"\"\"\n Given a list of page numbers and `None` page breaks,\n return a list of `PageLink` objects.\n \"\"\"\n page_links = []\n for page_number in page_numbers:\n if page_number is None:\n page_link = PAGE_BREAK\n else:\n page_link = PageLink(\n url=url_func(page_number),\n number=page_number,\n is_active=(page_number == current),\n is_break=False\n )\n page_links.append(page_link)\n return page_links\n\n\ndef _reverse_ordering(ordering_tuple):\n \"\"\"\n Given an order_by tuple such as `('-created', 'uuid')` reverse the\n ordering and return a new tuple, eg. `('created', '-uuid')`.\n \"\"\"\n def invert(x):\n return x[1:] if x.startswith('-') else '-' + x\n\n return tuple([invert(item) for item in ordering_tuple])\n\n\nCursor = namedtuple('Cursor', ['offset', 'reverse', 'position'])\nPageLink = namedtuple('PageLink', ['url', 'number', 'is_active', 'is_break'])\n\nPAGE_BREAK = PageLink(url=None, number=None, is_active=False, is_break=True)\n\n\nclass BasePagination:\n display_page_controls = False\n\n def paginate_queryset(self, queryset, request, view=None): # pragma: no cover\n raise NotImplementedError('paginate_queryset() must be implemented.')\n\n def get_paginated_response(self, data): # pragma: no cover\n raise NotImplementedError('get_paginated_response() must be implemented.')\n\n def get_paginated_response_schema(self, schema):\n return schema\n\n def to_html(self): # pragma: no cover\n raise NotImplementedError('to_html() must be implemented to display page controls.')\n\n def get_results(self, data):\n return data['results']\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'\n return []\n\n def get_schema_operation_parameters(self, view):\n return []\n\n\nclass PageNumberPagination(BasePagination):\n \"\"\"\n A simple page number based style that supports page numbers as\n query parameters. For example:\n\n http://api.example.org/accounts/?page=4\n http://api.example.org/accounts/?page=4&page_size=100\n \"\"\"\n # The default page size.\n # Defaults to `None`, meaning pagination is disabled.\n page_size = api_settings.PAGE_SIZE\n\n django_paginator_class = DjangoPaginator\n\n # Client can control the page using this query parameter.\n page_query_param = 'page'\n page_query_description = _('A page number within the paginated result set.')\n\n # Client can control the page size using this query parameter.\n # Default is 'None'. Set to eg 'page_size' to enable usage.\n page_size_query_param = None\n page_size_query_description = _('Number of results to return per page.')\n\n # Set to an integer to limit the maximum page size the client may request.\n # Only relevant if 'page_size_query_param' has also been set.\n max_page_size = None\n\n last_page_strings = ('last',)\n\n template = 'rest_framework/pagination/numbers.html'\n\n invalid_page_message = _('Invalid page.')\n\n def paginate_queryset(self, queryset, request, view=None):\n \"\"\"\n Paginate a queryset if required, either returning a\n page object, or `None` if pagination is not configured for this view.\n \"\"\"\n page_size = self.get_page_size(request)\n if not page_size:\n return None\n\n paginator = self.django_paginator_class(queryset, page_size)\n page_number = self.get_page_number(request, paginator)\n\n try:\n self.page = paginator.page(page_number)\n except InvalidPage as exc:\n msg = self.invalid_page_message.format(\n page_number=page_number, message=str(exc)\n )\n raise NotFound(msg)\n\n if paginator.num_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n self.request = request\n return list(self.page)\n\n def get_page_number(self, request, paginator):\n page_number = request.query_params.get(self.page_query_param, 1)\n if page_number in self.last_page_strings:\n page_number = paginator.num_pages\n return page_number\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.page.paginator.count),\n ('next', self.get_next_link()),\n ('previous', self.get_previous_link()),\n ('results', data)\n ]))\n\n def get_paginated_response_schema(self, schema):\n return {\n 'type': 'object',\n 'properties': {\n 'count': {\n 'type': 'integer',\n 'example': 123,\n },\n 'next': {\n 'type': 'string',\n 'nullable': True,\n 'format': 'uri',\n 'example': 'http://api.example.org/accounts/?{page_query_param}=4'.format(\n page_query_param=self.page_query_param)\n },\n 'previous': {\n 'type': 'string',\n 'nullable': True,\n 'format': 'uri',\n 'example': 'http://api.example.org/accounts/?{page_query_param}=2'.format(\n page_query_param=self.page_query_param)\n },\n 'results': schema,\n },\n }\n\n def get_page_size(self, request):\n if self.page_size_query_param:\n try:\n return _positive_int(\n request.query_params[self.page_size_query_param],\n strict=True,\n cutoff=self.max_page_size\n )\n except (KeyError, ValueError):\n pass\n\n return self.page_size\n\n def get_next_link(self):\n if not self.page.has_next():\n return None\n url = self.request.build_absolute_uri()\n page_number = self.page.next_page_number()\n return replace_query_param(url, self.page_query_param, page_number)\n\n def get_previous_link(self):\n if not self.page.has_previous():\n return None\n url = self.request.build_absolute_uri()\n page_number = self.page.previous_page_number()\n if page_number == 1:\n return remove_query_param(url, self.page_query_param)\n return replace_query_param(url, self.page_query_param, page_number)\n\n def get_html_context(self):\n base_url = self.request.build_absolute_uri()\n\n def page_number_to_url(page_number):\n if page_number == 1:\n return remove_query_param(base_url, self.page_query_param)\n else:\n return replace_query_param(base_url, self.page_query_param, page_number)\n\n current = self.page.number\n final = self.page.paginator.num_pages\n page_numbers = _get_displayed_page_numbers(current, final)\n page_links = _get_page_links(page_numbers, current, page_number_to_url)\n\n return {\n 'previous_url': self.get_previous_link(),\n 'next_url': self.get_next_link(),\n 'page_links': page_links\n }\n\n def to_html(self):\n template = loader.get_template(self.template)\n context = self.get_html_context()\n return template.render(context)\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'\n assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'\n fields = [\n coreapi.Field(\n name=self.page_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Page',\n description=force_str(self.page_query_description)\n )\n )\n ]\n if self.page_size_query_param is not None:\n fields.append(\n coreapi.Field(\n name=self.page_size_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Page size',\n description=force_str(self.page_size_query_description)\n )\n )\n )\n return fields\n\n def get_schema_operation_parameters(self, view):\n parameters = [\n {\n 'name': self.page_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.page_query_description),\n 'schema': {\n 'type': 'integer',\n },\n },\n ]\n if self.page_size_query_param is not None:\n parameters.append(\n {\n 'name': self.page_size_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.page_size_query_description),\n 'schema': {\n 'type': 'integer',\n },\n },\n )\n return parameters\n\n\nclass LimitOffsetPagination(BasePagination):\n \"\"\"\n A limit/offset based style. For example:\n\n http://api.example.org/accounts/?limit=100\n http://api.example.org/accounts/?offset=400&limit=100\n \"\"\"\n default_limit = api_settings.PAGE_SIZE\n limit_query_param = 'limit'\n limit_query_description = _('Number of results to return per page.')\n offset_query_param = 'offset'\n offset_query_description = _('The initial index from which to return the results.')\n max_limit = None\n template = 'rest_framework/pagination/numbers.html'\n\n def paginate_queryset(self, queryset, request, view=None):\n self.limit = self.get_limit(request)\n if self.limit is None:\n return None\n\n self.count = self.get_count(queryset)\n self.offset = self.get_offset(request)\n self.request = request\n if self.count > self.limit and self.template is not None:\n self.display_page_controls = True\n\n if self.count == 0 or self.offset > self.count:\n return []\n return list(queryset[self.offset:self.offset + self.limit])\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('next', self.get_next_link()),\n ('previous', self.get_previous_link()),\n ('results', data)\n ]))\n\n def get_paginated_response_schema(self, schema):\n return {\n 'type': 'object',\n 'properties': {\n 'count': {\n 'type': 'integer',\n 'example': 123,\n },\n 'next': {\n 'type': 'string',\n 'nullable': True,\n 'format': 'uri',\n 'example': 'http://api.example.org/accounts/?{offset_param}=400&{limit_param}=100'.format(\n offset_param=self.offset_query_param, limit_param=self.limit_query_param),\n },\n 'previous': {\n 'type': 'string',\n 'nullable': True,\n 'format': 'uri',\n 'example': 'http://api.example.org/accounts/?{offset_param}=200&{limit_param}=100'.format(\n offset_param=self.offset_query_param, limit_param=self.limit_query_param),\n },\n 'results': schema,\n },\n }\n\n def get_limit(self, request):\n if self.limit_query_param:\n try:\n return _positive_int(\n request.query_params[self.limit_query_param],\n strict=True,\n cutoff=self.max_limit\n )\n except (KeyError, ValueError):\n pass\n\n return self.default_limit\n\n def get_offset(self, request):\n try:\n return _positive_int(\n request.query_params[self.offset_query_param],\n )\n except (KeyError, ValueError):\n return 0\n\n def get_next_link(self):\n if self.offset + self.limit >= self.count:\n return None\n\n url = self.request.build_absolute_uri()\n url = replace_query_param(url, self.limit_query_param, self.limit)\n\n offset = self.offset + self.limit\n return replace_query_param(url, self.offset_query_param, offset)\n\n def get_previous_link(self):\n if self.offset <= 0:\n return None\n\n url = self.request.build_absolute_uri()\n url = replace_query_param(url, self.limit_query_param, self.limit)\n\n if self.offset - self.limit <= 0:\n return remove_query_param(url, self.offset_query_param)\n\n offset = self.offset - self.limit\n return replace_query_param(url, self.offset_query_param, offset)\n\n def get_html_context(self):\n base_url = self.request.build_absolute_uri()\n\n if self.limit:\n current = _divide_with_ceil(self.offset, self.limit) + 1\n\n # The number of pages is a little bit fiddly.\n # We need to sum both the number of pages from current offset to end\n # plus the number of pages up to the current offset.\n # When offset is not strictly divisible by the limit then we may\n # end up introducing an extra page as an artifact.\n final = (\n _divide_with_ceil(self.count - self.offset, self.limit) +\n _divide_with_ceil(self.offset, self.limit)\n )\n\n final = max(final, 1)\n else:\n current = 1\n final = 1\n\n if current > final:\n current = final\n\n def page_number_to_url(page_number):\n if page_number == 1:\n return remove_query_param(base_url, self.offset_query_param)\n else:\n offset = self.offset + ((page_number - current) * self.limit)\n return replace_query_param(base_url, self.offset_query_param, offset)\n\n page_numbers = _get_displayed_page_numbers(current, final)\n page_links = _get_page_links(page_numbers, current, page_number_to_url)\n\n return {\n 'previous_url': self.get_previous_link(),\n 'next_url': self.get_next_link(),\n 'page_links': page_links\n }\n\n def to_html(self):\n template = loader.get_template(self.template)\n context = self.get_html_context()\n return template.render(context)\n\n def get_count(self, queryset):\n \"\"\"\n Determine an object count, supporting either querysets or regular lists.\n \"\"\"\n try:\n return queryset.count()\n except (AttributeError, TypeError):\n return len(queryset)\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'\n assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'\n return [\n coreapi.Field(\n name=self.limit_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Limit',\n description=force_str(self.limit_query_description)\n )\n ),\n coreapi.Field(\n name=self.offset_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Offset',\n description=force_str(self.offset_query_description)\n )\n )\n ]\n\n def get_schema_operation_parameters(self, view):\n parameters = [\n {\n 'name': self.limit_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.limit_query_description),\n 'schema': {\n 'type': 'integer',\n },\n },\n {\n 'name': self.offset_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.offset_query_description),\n 'schema': {\n 'type': 'integer',\n },\n },\n ]\n return parameters\n\n\nclass CursorPagination(BasePagination):\n \"\"\"\n The cursor pagination implementation is necessarily complex.\n For an overview of the position/offset style we use, see this post:\n https://cra.mr/2011/03/08/building-cursors-for-the-disqus-api\n \"\"\"\n cursor_query_param = 'cursor'\n cursor_query_description = _('The pagination cursor value.')\n page_size = api_settings.PAGE_SIZE\n invalid_cursor_message = _('Invalid cursor')\n ordering = '-created'\n template = 'rest_framework/pagination/previous_and_next.html'\n\n # Client can control the page size using this query parameter.\n # Default is 'None'. Set to eg 'page_size' to enable usage.\n page_size_query_param = None\n page_size_query_description = _('Number of results to return per page.')\n\n # Set to an integer to limit the maximum page size the client may request.\n # Only relevant if 'page_size_query_param' has also been set.\n max_page_size = None\n\n # The offset in the cursor is used in situations where we have a\n # nearly-unique index. (Eg millisecond precision creation timestamps)\n # We guard against malicious users attempting to cause expensive database\n # queries, by having a hard cap on the maximum possible size of the offset.\n offset_cutoff = 1000\n\n def paginate_queryset(self, queryset, request, view=None):\n self.page_size = self.get_page_size(request)\n if not self.page_size:\n return None\n\n self.base_url = request.build_absolute_uri()\n self.ordering = self.get_ordering(request, queryset, view)\n\n self.cursor = self.decode_cursor(request)\n if self.cursor is None:\n (offset, reverse, current_position) = (0, False, None)\n else:\n (offset, reverse, current_position) = self.cursor\n\n # Cursor pagination always enforces an ordering.\n if reverse:\n queryset = queryset.order_by(*_reverse_ordering(self.ordering))\n else:\n queryset = queryset.order_by(*self.ordering)\n\n # If we have a cursor with a fixed position then filter by that.\n if current_position is not None:\n order = self.ordering[0]\n is_reversed = order.startswith('-')\n order_attr = order.lstrip('-')\n\n # Test for: (cursor reversed) XOR (queryset reversed)\n if self.cursor.reverse != is_reversed:\n kwargs = {order_attr + '__lt': current_position}\n else:\n kwargs = {order_attr + '__gt': current_position}\n\n queryset = queryset.filter(**kwargs)\n\n # If we have an offset cursor then offset the entire page by that amount.\n # We also always fetch an extra item in order to determine if there is a\n # page following on from this one.\n results = list(queryset[offset:offset + self.page_size + 1])\n self.page = list(results[:self.page_size])\n\n # Determine the position of the final item following the page.\n if len(results) > len(self.page):\n has_following_position = True\n following_position = self._get_position_from_instance(results[-1], self.ordering)\n else:\n has_following_position = False\n following_position = None\n\n if reverse:\n # If we have a reverse queryset, then the query ordering was in reverse\n # so we need to reverse the items again before returning them to the user.\n self.page = list(reversed(self.page))\n\n # Determine next and previous positions for reverse cursors.\n self.has_next = (current_position is not None) or (offset > 0)\n self.has_previous = has_following_position\n if self.has_next:\n self.next_position = current_position\n if self.has_previous:\n self.previous_position = following_position\n else:\n # Determine next and previous positions for forward cursors.\n self.has_next = has_following_position\n self.has_previous = (current_position is not None) or (offset > 0)\n if self.has_next:\n self.next_position = following_position\n if self.has_previous:\n self.previous_position = current_position\n\n # Display page controls in the browsable API if there is more\n # than one page.\n if (self.has_previous or self.has_next) and self.template is not None:\n self.display_page_controls = True\n\n return self.page\n\n def get_page_size(self, request):\n if self.page_size_query_param:\n try:\n return _positive_int(\n request.query_params[self.page_size_query_param],\n strict=True,\n cutoff=self.max_page_size\n )\n except (KeyError, ValueError):\n pass\n\n return self.page_size\n\n def get_next_link(self):\n if not self.has_next:\n return None\n\n if self.page and self.cursor and self.cursor.reverse and self.cursor.offset != 0:\n # If we're reversing direction and we have an offset cursor\n # then we cannot use the first position we find as a marker.\n compare = self._get_position_from_instance(self.page[-1], self.ordering)\n else:\n compare = self.next_position\n offset = 0\n\n has_item_with_unique_position = False\n for item in reversed(self.page):\n position = self._get_position_from_instance(item, self.ordering)\n if position != compare:\n # The item in this position and the item following it\n # have different positions. We can use this position as\n # our marker.\n has_item_with_unique_position = True\n break\n\n # The item in this position has the same position as the item\n # following it, we can't use it as a marker position, so increment\n # the offset and keep seeking to the previous item.\n compare = position\n offset += 1\n\n if self.page and not has_item_with_unique_position:\n # There were no unique positions in the page.\n if not self.has_previous:\n # We are on the first page.\n # Our cursor will have an offset equal to the page size,\n # but no position to filter against yet.\n offset = self.page_size\n position = None\n elif self.cursor.reverse:\n # The change in direction will introduce a paging artifact,\n # where we end up skipping forward a few extra items.\n offset = 0\n position = self.previous_position\n else:\n # Use the position from the existing cursor and increment\n # it's offset by the page size.\n offset = self.cursor.offset + self.page_size\n position = self.previous_position\n\n if not self.page:\n position = self.next_position\n\n cursor = Cursor(offset=offset, reverse=False, position=position)\n return self.encode_cursor(cursor)\n\n def get_previous_link(self):\n if not self.has_previous:\n return None\n\n if self.page and self.cursor and not self.cursor.reverse and self.cursor.offset != 0:\n # If we're reversing direction and we have an offset cursor\n # then we cannot use the first position we find as a marker.\n compare = self._get_position_from_instance(self.page[0], self.ordering)\n else:\n compare = self.previous_position\n offset = 0\n\n has_item_with_unique_position = False\n for item in self.page:\n position = self._get_position_from_instance(item, self.ordering)\n if position != compare:\n # The item in this position and the item following it\n # have different positions. We can use this position as\n # our marker.\n has_item_with_unique_position = True\n break\n\n # The item in this position has the same position as the item\n # following it, we can't use it as a marker position, so increment\n # the offset and keep seeking to the previous item.\n compare = position\n offset += 1\n\n if self.page and not has_item_with_unique_position:\n # There were no unique positions in the page.\n if not self.has_next:\n # We are on the final page.\n # Our cursor will have an offset equal to the page size,\n # but no position to filter against yet.\n offset = self.page_size\n position = None\n elif self.cursor.reverse:\n # Use the position from the existing cursor and increment\n # it's offset by the page size.\n offset = self.cursor.offset + self.page_size\n position = self.next_position\n else:\n # The change in direction will introduce a paging artifact,\n # where we end up skipping back a few extra items.\n offset = 0\n position = self.next_position\n\n if not self.page:\n position = self.previous_position\n\n cursor = Cursor(offset=offset, reverse=True, position=position)\n return self.encode_cursor(cursor)\n\n def get_ordering(self, request, queryset, view):\n \"\"\"\n Return a tuple of strings, that may be used in an `order_by` method.\n \"\"\"\n ordering_filters = [\n filter_cls for filter_cls in getattr(view, 'filter_backends', [])\n if hasattr(filter_cls, 'get_ordering')\n ]\n\n if ordering_filters:\n # If a filter exists on the view that implements `get_ordering`\n # then we defer to that filter to determine the ordering.\n filter_cls = ordering_filters[0]\n filter_instance = filter_cls()\n ordering = filter_instance.get_ordering(request, queryset, view)\n assert ordering is not None, (\n 'Using cursor pagination, but filter class {filter_cls} '\n 'returned a `None` ordering.'.format(\n filter_cls=filter_cls.__name__\n )\n )\n else:\n # The default case is to check for an `ordering` attribute\n # on this pagination instance.\n ordering = self.ordering\n assert ordering is not None, (\n 'Using cursor pagination, but no ordering attribute was declared '\n 'on the pagination class.'\n )\n assert '__' not in ordering, (\n 'Cursor pagination does not support double underscore lookups '\n 'for orderings. Orderings should be an unchanging, unique or '\n 'nearly-unique field on the model, such as \"-created\" or \"pk\".'\n )\n\n assert isinstance(ordering, (str, list, tuple)), (\n 'Invalid ordering. Expected string or tuple, but got {type}'.format(\n type=type(ordering).__name__\n )\n )\n\n if isinstance(ordering, str):\n return (ordering,)\n return tuple(ordering)\n\n def decode_cursor(self, request):\n \"\"\"\n Given a request with a cursor, return a `Cursor` instance.\n \"\"\"\n # Determine if we have a cursor, and if so then decode it.\n encoded = request.query_params.get(self.cursor_query_param)\n if encoded is None:\n return None\n\n try:\n querystring = b64decode(encoded.encode('ascii')).decode('ascii')\n tokens = parse.parse_qs(querystring, keep_blank_values=True)\n\n offset = tokens.get('o', ['0'])[0]\n offset = _positive_int(offset, cutoff=self.offset_cutoff)\n\n reverse = tokens.get('r', ['0'])[0]\n reverse = bool(int(reverse))\n\n position = tokens.get('p', [None])[0]\n except (TypeError, ValueError):\n raise NotFound(self.invalid_cursor_message)\n\n return Cursor(offset=offset, reverse=reverse, position=position)\n\n def encode_cursor(self, cursor):\n \"\"\"\n Given a Cursor instance, return an url with encoded cursor.\n \"\"\"\n tokens = {}\n if cursor.offset != 0:\n tokens['o'] = str(cursor.offset)\n if cursor.reverse:\n tokens['r'] = '1'\n if cursor.position is not None:\n tokens['p'] = cursor.position\n\n querystring = parse.urlencode(tokens, doseq=True)\n encoded = b64encode(querystring.encode('ascii')).decode('ascii')\n return replace_query_param(self.base_url, self.cursor_query_param, encoded)\n\n def _get_position_from_instance(self, instance, ordering):\n field_name = ordering[0].lstrip('-')\n if isinstance(instance, dict):\n attr = instance[field_name]\n else:\n attr = getattr(instance, field_name)\n return str(attr)\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('next', self.get_next_link()),\n ('previous', self.get_previous_link()),\n ('results', data)\n ]))\n\n def get_paginated_response_schema(self, schema):\n return {\n 'type': 'object',\n 'properties': {\n 'next': {\n 'type': 'string',\n 'nullable': True,\n },\n 'previous': {\n 'type': 'string',\n 'nullable': True,\n },\n 'results': schema,\n },\n }\n\n def get_html_context(self):\n return {\n 'previous_url': self.get_previous_link(),\n 'next_url': self.get_next_link()\n }\n\n def to_html(self):\n template = loader.get_template(self.template)\n context = self.get_html_context()\n return template.render(context)\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'\n assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'\n fields = [\n coreapi.Field(\n name=self.cursor_query_param,\n required=False,\n location='query',\n schema=coreschema.String(\n title='Cursor',\n description=force_str(self.cursor_query_description)\n )\n )\n ]\n if self.page_size_query_param is not None:\n fields.append(\n coreapi.Field(\n name=self.page_size_query_param,\n required=False,\n location='query',\n schema=coreschema.Integer(\n title='Page size',\n description=force_str(self.page_size_query_description)\n )\n )\n )\n return fields\n\n def get_schema_operation_parameters(self, view):\n parameters = [\n {\n 'name': self.cursor_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.cursor_query_description),\n 'schema': {\n 'type': 'string',\n },\n }\n ]\n if self.page_size_query_param is not None:\n parameters.append(\n {\n 'name': self.page_size_query_param,\n 'required': False,\n 'in': 'query',\n 'description': force_str(self.page_size_query_description),\n 'schema': {\n 'type': 'integer',\n },\n }\n )\n return parameters\n", "path": "rest_framework/pagination.py" } ]
diff --git a/rest_framework/pagination.py b/rest_framework/pagination.py index dc120d8e86..e815d8d5cf 100644 --- a/rest_framework/pagination.py +++ b/rest_framework/pagination.py @@ -961,7 +961,7 @@ def get_schema_operation_parameters(self, view): 'in': 'query', 'description': force_str(self.cursor_query_description), 'schema': { - 'type': 'integer', + 'type': 'string', }, } ]
lutris__lutris-4038
Bottom panel switches to a different game when it stops If another game is running and you switch to a different one, and the first game is closed by itself (like quitting it manually, closing the game window), not through Lutris, the bottom panel will switch to that stopped game all by itself, without user's interaction: ![Peek 2022-02-04 13-29](https://user-images.githubusercontent.com/10602045/152513690-224ef6c1-b56f-4510-9d0f-897ffe2dd8ea.gif) It should be noted that so far, only I can reproduce this, for some bizarre reason.
[ { "content": "from datetime import datetime\nfrom gettext import gettext as _\n\nfrom gi.repository import GObject, Gtk, Pango\n\nfrom lutris import runners, services\nfrom lutris.database.games import get_game_by_field, get_game_for_service\nfrom lutris.game import Game\nfrom lutris.gui.widgets.utils import get_link_button\nfrom lutris.util.strings import gtk_safe\n\n\nclass GameBar(Gtk.Box):\n def __init__(self, db_game, game_actions, application):\n \"\"\"Create the game bar with a database row\"\"\"\n super().__init__(orientation=Gtk.Orientation.VERTICAL, visible=True,\n margin_top=12,\n margin_left=12,\n margin_bottom=12,\n margin_right=12,\n spacing=6)\n GObject.add_emission_hook(Game, \"game-start\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-started\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-stopped\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-updated\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-removed\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-installed\", self.on_game_state_changed)\n\n self.set_margin_bottom(12)\n self.game_actions = game_actions\n self.db_game = db_game\n self.service = None\n if db_game.get(\"service\"):\n try:\n self.service = services.SERVICES[db_game[\"service\"]]()\n except KeyError:\n pass\n\n game_id = None\n if \"service_id\" in db_game:\n self.appid = db_game[\"service_id\"]\n game_id = db_game[\"id\"]\n elif self.service:\n self.appid = db_game[\"appid\"]\n if self.service.id == \"lutris\":\n game = get_game_by_field(self.appid, field=\"slug\")\n else:\n game = get_game_for_service(self.service.id, self.appid)\n if game:\n game_id = game[\"id\"]\n if game_id:\n self.game = application.get_game_by_id(game_id) or Game(game_id)\n else:\n self.game = Game()\n self.game.name = db_game[\"name\"]\n self.game.slug = db_game[\"slug\"]\n self.game.appid = self.appid\n self.game.service = self.service.id if self.service else None\n game_actions.set_game(self.game)\n self.update_view()\n\n def clear_view(self):\n \"\"\"Clears all widgets from the container\"\"\"\n for child in self.get_children():\n child.destroy()\n\n def update_view(self):\n \"\"\"Populate the view with widgets\"\"\"\n game_label = self.get_game_name_label()\n game_label.set_halign(Gtk.Align.START)\n self.pack_start(game_label, False, False, 0)\n\n hbox = Gtk.Box(Gtk.Orientation.HORIZONTAL, spacing=6)\n self.pack_start(hbox, False, False, 0)\n\n self.play_button = self.get_play_button()\n hbox.pack_start(self.play_button, False, False, 0)\n\n if self.game.is_installed:\n hbox.pack_start(self.get_runner_button(), False, False, 0)\n hbox.pack_start(self.get_platform_label(), False, False, 0)\n if self.game.lastplayed:\n hbox.pack_start(self.get_last_played_label(), False, False, 0)\n if self.game.playtime:\n hbox.pack_start(self.get_playtime_label(), False, False, 0)\n hbox.show_all()\n\n def get_popover(self, buttons, parent):\n \"\"\"Return the popover widget containing a list of link buttons\"\"\"\n if not buttons:\n return None\n popover = Gtk.Popover()\n vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, visible=True)\n\n for action in buttons:\n vbox.pack_end(buttons[action], False, False, 1)\n popover.add(vbox)\n popover.set_position(Gtk.PositionType.TOP)\n popover.set_constrain_to(Gtk.PopoverConstraint.NONE)\n popover.set_relative_to(parent)\n return popover\n\n def get_game_name_label(self):\n \"\"\"Return the label with the game's title\"\"\"\n title_label = Gtk.Label(visible=True)\n title_label.set_ellipsize(Pango.EllipsizeMode.END)\n title_label.set_markup(\"<span font_desc='16'><b>%s</b></span>\" % gtk_safe(self.game.name))\n return title_label\n\n def get_runner_button(self):\n icon_name = self.game.runner.name + \"-symbolic\"\n runner_icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n runner_icon.show()\n box = Gtk.HBox(visible=True)\n runner_button = Gtk.Button(visible=True)\n popover = self.get_popover(self.get_runner_buttons(), runner_button)\n if popover:\n runner_button.set_image(runner_icon)\n popover_button = Gtk.MenuButton(visible=True)\n popover_button.set_size_request(32, 32)\n popover_button.props.direction = Gtk.ArrowType.UP\n popover_button.set_popover(popover)\n runner_button.connect(\"clicked\", lambda _x: popover_button.emit(\"clicked\"))\n box.add(runner_button)\n box.add(popover_button)\n style_context = box.get_style_context()\n style_context.add_class(\"linked\")\n else:\n runner_icon.set_margin_left(49)\n runner_icon.set_margin_right(6)\n box.add(runner_icon)\n return box\n\n def get_platform_label(self):\n platform_label = Gtk.Label(visible=True)\n platform_label.set_size_request(120, -1)\n platform_label.set_alignment(0, 0.5)\n platform = gtk_safe(self.game.platform)\n platform_label.set_tooltip_markup(platform)\n platform_label.set_markup(_(\"Platform:\\n<b>%s</b>\") % platform)\n platform_label.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return platform_label\n\n def get_playtime_label(self):\n \"\"\"Return the label containing the playtime info\"\"\"\n playtime_label = Gtk.Label(visible=True)\n playtime_label.set_size_request(120, -1)\n playtime_label.set_alignment(0, 0.5)\n playtime_label.set_markup(_(\"Time played:\\n<b>%s</b>\") % self.game.formatted_playtime)\n return playtime_label\n\n def get_last_played_label(self):\n \"\"\"Return the label containing the last played info\"\"\"\n last_played_label = Gtk.Label(visible=True)\n last_played_label.set_size_request(120, -1)\n last_played_label.set_alignment(0, 0.5)\n lastplayed = datetime.fromtimestamp(self.game.lastplayed)\n last_played_label.set_markup(_(\"Last played:\\n<b>%s</b>\") % lastplayed.strftime(\"%x\"))\n return last_played_label\n\n def get_popover_button(self):\n \"\"\"Return the popover button+menu for the Play button\"\"\"\n popover_button = Gtk.MenuButton(visible=True)\n popover_button.set_size_request(32, 32)\n popover_button.props.direction = Gtk.ArrowType.UP\n\n return popover_button\n\n def get_popover_box(self):\n \"\"\"Return a container for a button + a popover button attached to it\"\"\"\n box = Gtk.HBox(visible=True)\n style_context = box.get_style_context()\n style_context.add_class(\"linked\")\n return box\n\n def get_locate_installed_game_button(self):\n \"\"\"Return a button to locate an existing install\"\"\"\n button = get_link_button(\"Locate installed game\")\n button.show()\n button.connect(\"clicked\", self.game_actions.on_locate_installed_game, self.game)\n return {\"locate\": button}\n\n def get_play_button(self):\n \"\"\"Return the widget for install/play/stop and game config\"\"\"\n button = Gtk.Button(visible=True)\n button.set_size_request(120, 32)\n box = self.get_popover_box()\n popover_button = self.get_popover_button()\n if self.game.is_installed:\n if self.game.state == self.game.STATE_STOPPED:\n button.set_label(_(\"Play\"))\n button.connect(\"clicked\", self.game_actions.on_game_launch)\n elif self.game.state == self.game.STATE_LAUNCHING:\n button.set_label(_(\"Launching\"))\n button.set_sensitive(False)\n else:\n button.set_label(_(\"Stop\"))\n button.connect(\"clicked\", self.game_actions.on_game_stop)\n else:\n button.set_label(_(\"Install\"))\n button.connect(\"clicked\", self.game_actions.on_install_clicked)\n if self.service:\n if self.service.local:\n # Local services don't show an install dialog, they can be launched directly\n button.set_label(_(\"Play\"))\n if self.service.drm_free:\n button.set_size_request(84, 32)\n box.add(button)\n popover = self.get_popover(self.get_locate_installed_game_button(), popover_button)\n popover_button.set_popover(popover)\n box.add(popover_button)\n return box\n return button\n button.set_size_request(84, 32)\n box.add(button)\n popover = self.get_popover(self.get_game_buttons(), popover_button)\n popover_button.set_popover(popover)\n box.add(popover_button)\n return box\n\n def get_game_buttons(self):\n \"\"\"Return a dictionary of buttons to use in the panel\"\"\"\n displayed = self.game_actions.get_displayed_entries()\n buttons = {}\n for action in self.game_actions.get_game_actions():\n action_id, label, callback = action\n if action_id in (\"play\", \"stop\", \"install\"):\n continue\n button = get_link_button(label)\n if displayed.get(action_id):\n button.show()\n else:\n button.hide()\n buttons[action_id] = button\n button.connect(\"clicked\", self.on_link_button_clicked, callback)\n return buttons\n\n def get_runner_buttons(self):\n buttons = {}\n if self.game.runner_name and self.game.is_installed:\n runner = runners.import_runner(self.game.runner_name)(self.game.config)\n for entry in runner.context_menu_entries:\n name, label, callback = entry\n button = get_link_button(label)\n button.show()\n button.connect(\"clicked\", self.on_link_button_clicked, callback)\n buttons[name] = button\n return buttons\n\n def on_link_button_clicked(self, button, callback):\n \"\"\"Callback for link buttons. Closes the popover then runs the actual action\"\"\"\n popover = button.get_parent().get_parent()\n popover.popdown()\n callback(button)\n\n def on_install_clicked(self, button):\n \"\"\"Handler for installing service games\"\"\"\n self.service.install(self.db_game)\n\n def on_game_state_changed(self, game):\n \"\"\"Handler called when the game has changed state\"\"\"\n if (\n game.id == self.game.id\n or game.appid == self.appid\n ):\n self.game = game\n else:\n return True\n self.clear_view()\n self.update_view()\n return True\n", "path": "lutris/gui/widgets/game_bar.py" } ]
[ { "content": "from datetime import datetime\nfrom gettext import gettext as _\n\nfrom gi.repository import GObject, Gtk, Pango\n\nfrom lutris import runners, services\nfrom lutris.database.games import get_game_by_field, get_game_for_service\nfrom lutris.game import Game\nfrom lutris.gui.widgets.utils import get_link_button\nfrom lutris.util.strings import gtk_safe\n\n\nclass GameBar(Gtk.Box):\n def __init__(self, db_game, game_actions, application):\n \"\"\"Create the game bar with a database row\"\"\"\n super().__init__(orientation=Gtk.Orientation.VERTICAL, visible=True,\n margin_top=12,\n margin_left=12,\n margin_bottom=12,\n margin_right=12,\n spacing=6)\n GObject.add_emission_hook(Game, \"game-start\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-started\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-stopped\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-updated\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-removed\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-installed\", self.on_game_state_changed)\n\n self.set_margin_bottom(12)\n self.game_actions = game_actions\n self.db_game = db_game\n self.service = None\n if db_game.get(\"service\"):\n try:\n self.service = services.SERVICES[db_game[\"service\"]]()\n except KeyError:\n pass\n\n game_id = None\n if \"service_id\" in db_game:\n self.appid = db_game[\"service_id\"]\n game_id = db_game[\"id\"]\n elif self.service:\n self.appid = db_game[\"appid\"]\n if self.service.id == \"lutris\":\n game = get_game_by_field(self.appid, field=\"slug\")\n else:\n game = get_game_for_service(self.service.id, self.appid)\n if game:\n game_id = game[\"id\"]\n if game_id:\n self.game = application.get_game_by_id(game_id) or Game(game_id)\n else:\n self.game = Game()\n self.game.name = db_game[\"name\"]\n self.game.slug = db_game[\"slug\"]\n self.game.appid = self.appid\n self.game.service = self.service.id if self.service else None\n game_actions.set_game(self.game)\n self.update_view()\n\n def clear_view(self):\n \"\"\"Clears all widgets from the container\"\"\"\n for child in self.get_children():\n child.destroy()\n\n def update_view(self):\n \"\"\"Populate the view with widgets\"\"\"\n game_label = self.get_game_name_label()\n game_label.set_halign(Gtk.Align.START)\n self.pack_start(game_label, False, False, 0)\n\n hbox = Gtk.Box(Gtk.Orientation.HORIZONTAL, spacing=6)\n self.pack_start(hbox, False, False, 0)\n\n self.play_button = self.get_play_button()\n hbox.pack_start(self.play_button, False, False, 0)\n\n if self.game.is_installed:\n hbox.pack_start(self.get_runner_button(), False, False, 0)\n hbox.pack_start(self.get_platform_label(), False, False, 0)\n if self.game.lastplayed:\n hbox.pack_start(self.get_last_played_label(), False, False, 0)\n if self.game.playtime:\n hbox.pack_start(self.get_playtime_label(), False, False, 0)\n hbox.show_all()\n\n def get_popover(self, buttons, parent):\n \"\"\"Return the popover widget containing a list of link buttons\"\"\"\n if not buttons:\n return None\n popover = Gtk.Popover()\n vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, visible=True)\n\n for action in buttons:\n vbox.pack_end(buttons[action], False, False, 1)\n popover.add(vbox)\n popover.set_position(Gtk.PositionType.TOP)\n popover.set_constrain_to(Gtk.PopoverConstraint.NONE)\n popover.set_relative_to(parent)\n return popover\n\n def get_game_name_label(self):\n \"\"\"Return the label with the game's title\"\"\"\n title_label = Gtk.Label(visible=True)\n title_label.set_ellipsize(Pango.EllipsizeMode.END)\n title_label.set_markup(\"<span font_desc='16'><b>%s</b></span>\" % gtk_safe(self.game.name))\n return title_label\n\n def get_runner_button(self):\n icon_name = self.game.runner.name + \"-symbolic\"\n runner_icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n runner_icon.show()\n box = Gtk.HBox(visible=True)\n runner_button = Gtk.Button(visible=True)\n popover = self.get_popover(self.get_runner_buttons(), runner_button)\n if popover:\n runner_button.set_image(runner_icon)\n popover_button = Gtk.MenuButton(visible=True)\n popover_button.set_size_request(32, 32)\n popover_button.props.direction = Gtk.ArrowType.UP\n popover_button.set_popover(popover)\n runner_button.connect(\"clicked\", lambda _x: popover_button.emit(\"clicked\"))\n box.add(runner_button)\n box.add(popover_button)\n style_context = box.get_style_context()\n style_context.add_class(\"linked\")\n else:\n runner_icon.set_margin_left(49)\n runner_icon.set_margin_right(6)\n box.add(runner_icon)\n return box\n\n def get_platform_label(self):\n platform_label = Gtk.Label(visible=True)\n platform_label.set_size_request(120, -1)\n platform_label.set_alignment(0, 0.5)\n platform = gtk_safe(self.game.platform)\n platform_label.set_tooltip_markup(platform)\n platform_label.set_markup(_(\"Platform:\\n<b>%s</b>\") % platform)\n platform_label.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return platform_label\n\n def get_playtime_label(self):\n \"\"\"Return the label containing the playtime info\"\"\"\n playtime_label = Gtk.Label(visible=True)\n playtime_label.set_size_request(120, -1)\n playtime_label.set_alignment(0, 0.5)\n playtime_label.set_markup(_(\"Time played:\\n<b>%s</b>\") % self.game.formatted_playtime)\n return playtime_label\n\n def get_last_played_label(self):\n \"\"\"Return the label containing the last played info\"\"\"\n last_played_label = Gtk.Label(visible=True)\n last_played_label.set_size_request(120, -1)\n last_played_label.set_alignment(0, 0.5)\n lastplayed = datetime.fromtimestamp(self.game.lastplayed)\n last_played_label.set_markup(_(\"Last played:\\n<b>%s</b>\") % lastplayed.strftime(\"%x\"))\n return last_played_label\n\n def get_popover_button(self):\n \"\"\"Return the popover button+menu for the Play button\"\"\"\n popover_button = Gtk.MenuButton(visible=True)\n popover_button.set_size_request(32, 32)\n popover_button.props.direction = Gtk.ArrowType.UP\n\n return popover_button\n\n def get_popover_box(self):\n \"\"\"Return a container for a button + a popover button attached to it\"\"\"\n box = Gtk.HBox(visible=True)\n style_context = box.get_style_context()\n style_context.add_class(\"linked\")\n return box\n\n def get_locate_installed_game_button(self):\n \"\"\"Return a button to locate an existing install\"\"\"\n button = get_link_button(\"Locate installed game\")\n button.show()\n button.connect(\"clicked\", self.game_actions.on_locate_installed_game, self.game)\n return {\"locate\": button}\n\n def get_play_button(self):\n \"\"\"Return the widget for install/play/stop and game config\"\"\"\n button = Gtk.Button(visible=True)\n button.set_size_request(120, 32)\n box = self.get_popover_box()\n popover_button = self.get_popover_button()\n if self.game.is_installed:\n if self.game.state == self.game.STATE_STOPPED:\n button.set_label(_(\"Play\"))\n button.connect(\"clicked\", self.game_actions.on_game_launch)\n elif self.game.state == self.game.STATE_LAUNCHING:\n button.set_label(_(\"Launching\"))\n button.set_sensitive(False)\n else:\n button.set_label(_(\"Stop\"))\n button.connect(\"clicked\", self.game_actions.on_game_stop)\n else:\n button.set_label(_(\"Install\"))\n button.connect(\"clicked\", self.game_actions.on_install_clicked)\n if self.service:\n if self.service.local:\n # Local services don't show an install dialog, they can be launched directly\n button.set_label(_(\"Play\"))\n if self.service.drm_free:\n button.set_size_request(84, 32)\n box.add(button)\n popover = self.get_popover(self.get_locate_installed_game_button(), popover_button)\n popover_button.set_popover(popover)\n box.add(popover_button)\n return box\n return button\n button.set_size_request(84, 32)\n box.add(button)\n popover = self.get_popover(self.get_game_buttons(), popover_button)\n popover_button.set_popover(popover)\n box.add(popover_button)\n return box\n\n def get_game_buttons(self):\n \"\"\"Return a dictionary of buttons to use in the panel\"\"\"\n displayed = self.game_actions.get_displayed_entries()\n buttons = {}\n for action in self.game_actions.get_game_actions():\n action_id, label, callback = action\n if action_id in (\"play\", \"stop\", \"install\"):\n continue\n button = get_link_button(label)\n if displayed.get(action_id):\n button.show()\n else:\n button.hide()\n buttons[action_id] = button\n button.connect(\"clicked\", self.on_link_button_clicked, callback)\n return buttons\n\n def get_runner_buttons(self):\n buttons = {}\n if self.game.runner_name and self.game.is_installed:\n runner = runners.import_runner(self.game.runner_name)(self.game.config)\n for entry in runner.context_menu_entries:\n name, label, callback = entry\n button = get_link_button(label)\n button.show()\n button.connect(\"clicked\", self.on_link_button_clicked, callback)\n buttons[name] = button\n return buttons\n\n def on_link_button_clicked(self, button, callback):\n \"\"\"Callback for link buttons. Closes the popover then runs the actual action\"\"\"\n popover = button.get_parent().get_parent()\n popover.popdown()\n callback(button)\n\n def on_install_clicked(self, button):\n \"\"\"Handler for installing service games\"\"\"\n self.service.install(self.db_game)\n\n def on_game_state_changed(self, game):\n \"\"\"Handler called when the game has changed state\"\"\"\n if (\n game.id == self.game.id\n or (self.appid and game.appid == self.appid)\n ):\n self.game = game\n else:\n return True\n self.clear_view()\n self.update_view()\n return True\n", "path": "lutris/gui/widgets/game_bar.py" } ]
diff --git a/lutris/gui/widgets/game_bar.py b/lutris/gui/widgets/game_bar.py index f10ab86066..c13be6410d 100644 --- a/lutris/gui/widgets/game_bar.py +++ b/lutris/gui/widgets/game_bar.py @@ -261,7 +261,7 @@ def on_game_state_changed(self, game): """Handler called when the game has changed state""" if ( game.id == self.game.id - or game.appid == self.appid + or (self.appid and game.appid == self.appid) ): self.game = game else:
ivy-llc__ivy-18204
meshgrid
[ { "content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py" } ]
[ { "content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef meshgrid(*args, **kwargs):\r\n return ivy.meshgrid(*args, indexing=\"ij\")\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py" } ]
diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py index 9ec862343d3ae..befe28ee15164 100644 --- a/ivy/functional/frontends/paddle/tensor/creation.py +++ b/ivy/functional/frontends/paddle/tensor/creation.py @@ -123,3 +123,11 @@ def triu(x, diagonal=0, name=None): def diagflat(x, offset=0, name=None): arr = ivy.diagflat(x, offset=offset) return arr + + +@with_supported_dtypes( + {"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" +) +@to_ivy_arrays_and_back +def meshgrid(*args, **kwargs): + return ivy.meshgrid(*args, indexing="ij") diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_creation.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_creation.py index 6ab823a6e0b41..d83f9693427b1 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_creation.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_creation.py @@ -454,3 +454,38 @@ def test_paddle_diagflat( x=x[0], offset=offset, ) + + +@handle_frontend_test( + fn_tree="paddle.meshgrid", + dtype_and_arrays=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + num_arrays=st.integers(min_value=2, max_value=5), + min_num_dims=1, + max_num_dims=1, + shared_dtype=True, + ), + test_with_out=st.just(False), +) +def test_paddle_meshgrid( + dtype_and_arrays, + test_flags, + frontend, + fn_tree, + on_device, +): + input_dtype, arrays = dtype_and_arrays + args = {} + i = 0 + for x_ in arrays: + args["x{}".format(i)] = x_ + i += 1 + test_flags.num_positional_args = len(arrays) + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + **args, + )
nilearn__nilearn-3162
Confusing Documentation in new_img_like I think there is some slightly confusing language in the docstring of `new_img_like`. The Parameters list indicates a copy_header parameter to indicate whether the header is copied, but the Returns section indicates "A loaded image with the same type _(and header)_ as the reference image." (emphasis mine). I think it would be more clear to say "A loaded image with the same _file_ type (and, _optionally_ header) as the reference image." The reason this is critical is that in nibabel, the header data type will override the data type of the numpy array being held (at least for the common Nifti1Image). Due to a number of changes, this is a little more critical (numpy uses 64-bit by default, whereas previously it was 32-bit, and yet AFNI, FSL, and freesurfer all do not support 64-bit integer data). Somebody could easily scan the "Returns" field and not the "Parameters" field (certainly not a developer like myself, no, never!) and then mistakenly believe that the header will be copied, and therefore that the output data type of the array will match the original header. Relevant docstring: https://github.com/nilearn/nilearn/blob/1607b52458c28953a87bbe6f42448b7b4e30a72f/nilearn/image/image.py#L699-L720
[ { "content": "\"\"\"\nPreprocessing functions for images.\n\nSee also nilearn.signal.\n\"\"\"\n# Authors: Philippe Gervais, Alexandre Abraham\n# License: simplified BSD\n\nimport collections.abc\nimport copy\nimport warnings\n\nimport nibabel\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom scipy import ndimage\nfrom scipy.stats import scoreatpercentile\n\nfrom .. import signal\nfrom .._utils import (_repr_niimgs,\n as_ndarray,\n check_niimg,\n check_niimg_3d,\n check_niimg_4d,\n fill_doc)\nfrom .._utils.niimg import _get_data, _safe_get_data\nfrom .._utils.niimg_conversions import _check_same_fov, _index_img\nfrom .._utils.param_validation import check_threshold\nfrom .._utils.helpers import rename_parameters\n\n\ndef get_data(img):\n \"\"\"Get the image data as a :class:`numpy.ndarray`.\n\n Parameters\n ----------\n img : Niimg-like object or iterable of Niimg-like objects\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n :class:`numpy.ndarray`\n 3D or 4D numpy array depending on the shape of `img`. This function\n preserves the type of the image data. If `img` is an in-memory Nifti image\n it returns the image data array itself -- not a copy.\n\n \"\"\"\n img = check_niimg(img)\n return _get_data(img)\n\n\ndef high_variance_confounds(imgs, n_confounds=5, percentile=2.,\n detrend=True, mask_img=None):\n \"\"\" Return confounds signals extracted from input signals with highest\n variance.\n\n Parameters\n ----------\n imgs : Niimg-like object\n 4D image.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n mask_img : Niimg-like object\n If not provided, all voxels are used.\n If provided, confounds are extracted from voxels inside the mask.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n n_confounds : :obj:`int`, optional\n Number of confounds to return. Default=5.\n\n percentile : :obj:`float`, optional\n Highest-variance signals percentile to keep before computing the\n singular value decomposition, 0. <= `percentile` <= 100.\n `mask_img.sum() * percentile / 100` must be greater than `n_confounds`.\n Default=2.\n\n detrend : :obj:`bool`, optional\n If True, detrend signals before processing. Default=True.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Highest variance confounds. Shape: *(number_of_scans, n_confounds)*.\n\n Notes\n ------\n This method is related to what has been published in the literature\n as 'CompCor' (Behzadi NeuroImage 2007).\n\n The implemented algorithm does the following:\n\n - Computes the sum of squares for each signal (no mean removal).\n - Keeps a given percentile of signals with highest variance (percentile).\n - Computes an SVD of the extracted signals.\n - Returns a given number (n_confounds) of signals from the SVD with\n highest singular values.\n\n See also\n --------\n nilearn.signal.high_variance_confounds\n\n \"\"\"\n from .. import masking\n\n if mask_img is not None:\n sigs = masking.apply_mask(imgs, mask_img)\n else:\n # Load the data only if it doesn't need to be masked\n imgs = check_niimg_4d(imgs)\n sigs = as_ndarray(get_data(imgs))\n # Not using apply_mask here saves memory in most cases.\n del imgs # help reduce memory consumption\n sigs = np.reshape(sigs, (-1, sigs.shape[-1])).T\n\n return signal.high_variance_confounds(sigs, n_confounds=n_confounds,\n percentile=percentile,\n detrend=detrend)\n\n\ndef _fast_smooth_array(arr):\n \"\"\"Simple smoothing which is less computationally expensive than\n applying a Gaussian filter.\n\n Only the first three dimensions of the array will be smoothed. The\n filter uses [0.2, 1, 0.2] weights in each direction and use a\n normalisation to preserve the local average value.\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n 4D array, with image number as last dimension. 3D arrays are\n also accepted.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Smoothed array.\n\n Notes\n -----\n Rather than calling this function directly, users are encouraged\n to call the high-level function :func:`smooth_img` with\n `fwhm='fast'`.\n\n \"\"\"\n neighbor_weight = 0.2\n # 6 neighbors in 3D if not on an edge\n nb_neighbors = 6\n # This scale ensures that a uniform array stays uniform\n # except on the array edges\n scale = 1 + nb_neighbors * neighbor_weight\n\n # Need to copy because the smoothing is done in multiple statements\n # and there does not seem to be an easy way to do it in place\n smoothed_arr = arr.copy()\n weighted_arr = neighbor_weight * arr\n\n smoothed_arr[:-1] += weighted_arr[1:]\n smoothed_arr[1:] += weighted_arr[:-1]\n smoothed_arr[:, :-1] += weighted_arr[:, 1:]\n smoothed_arr[:, 1:] += weighted_arr[:, :-1]\n smoothed_arr[:, :, :-1] += weighted_arr[:, :, 1:]\n smoothed_arr[:, :, 1:] += weighted_arr[:, :, :-1]\n smoothed_arr /= scale\n\n return smoothed_arr\n\n\n@fill_doc\ndef _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of `arr`.\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n 4D array, with image number as last dimension. 3D arrays are also\n accepted.\n\n affine : :class:`numpy.ndarray`\n (4, 4) matrix, giving affine transformation for image. (3, 3) matrices\n are also accepted (only these coefficients are used).\n If `fwhm='fast'`, the affine is not used and can be None.\n %(fwhm)s\n ensure_finite : :obj:`bool`, optional\n If True, replace every non-finite values (like NaNs) by zero before\n filtering. Default=True.\n\n copy : :obj:`bool`, optional\n If True, input array is not modified. True by default: the filtering\n is not performed in-place. Default=True.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Filtered `arr`.\n\n Notes\n -----\n This function is most efficient with arr in C order.\n\n \"\"\"\n # Here, we have to investigate use cases of fwhm. Particularly, if fwhm=0.\n # See issue #1537\n if isinstance(fwhm, (int, float)) and (fwhm == 0.0):\n warnings.warn(\"The parameter 'fwhm' for smoothing is specified \"\n \"as {0}. Setting it to None \"\n \"(no smoothing will be performed)\"\n .format(fwhm))\n fwhm = None\n if arr.dtype.kind == 'i':\n if arr.dtype == np.int64:\n arr = arr.astype(np.float64)\n else:\n arr = arr.astype(np.float32) # We don't need crazy precision.\n if copy:\n arr = arr.copy()\n if ensure_finite:\n # SPM tends to put NaNs in the data outside the brain\n arr[np.logical_not(np.isfinite(arr))] = 0\n if isinstance(fwhm, str) and (fwhm == 'fast'):\n arr = _fast_smooth_array(arr)\n elif fwhm is not None:\n fwhm = np.asarray([fwhm]).ravel()\n fwhm = np.asarray([0. if elem is None else elem for elem in fwhm])\n affine = affine[:3, :3] # Keep only the scale part.\n fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2)) # FWHM to sigma.\n vox_size = np.sqrt(np.sum(affine ** 2, axis=0))\n sigma = fwhm / (fwhm_over_sigma_ratio * vox_size)\n for n, s in enumerate(sigma):\n if s > 0.0:\n ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)\n return arr\n\n\n@fill_doc\ndef smooth_img(imgs, fwhm):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of `arr`.\n In all cases, non-finite values in input image are replaced by zeros.\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects\n Image(s) to smooth (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n %(fwhm)s\n\n Returns\n -------\n :class:`nibabel.nifti1.Nifti1Image` or list of\n Filtered input image. If `imgs` is an iterable, then `filtered_img` is a\n list.\n\n \"\"\"\n\n # Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug\n # See http://bugs.python.org/issue7624\n if hasattr(imgs, \"__iter__\") \\\n and not isinstance(imgs, str):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg(img)\n affine = img.affine\n filtered = _smooth_array(get_data(img), affine, fwhm=fwhm,\n ensure_finite=True, copy=True)\n ret.append(new_img_like(img, filtered, affine, copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n\n\ndef _crop_img_to(img, slices, copy=True):\n \"\"\"Crops an image to a smaller size.\n\n Crop `img` to size indicated by slices and adjust affine accordingly.\n\n Parameters\n ----------\n img : Niimg-like object\n Image to be cropped. If slices has less entries than `img` has dimensions,\n the slices will be applied to the first `len(slices)` dimensions (See\n http://nilearn.github.io/manipulating_images/input_output.html).\n\n slices : list of slices\n Defines the range of the crop.\n E.g. [slice(20, 200), slice(40, 150), slice(0, 100)] defines a cube.\n\n copy : :obj:`bool`, optional\n Specifies whether cropped data is to be copied or not. Default=True.\n\n Returns\n -------\n Niimg-like object\n Cropped version of the input image.\n\n offset : :obj:`list`, optional\n List of tuples representing the number of voxels removed (before, after)\n the cropped volumes, i.e.:\n *[(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)]*\n\n \"\"\"\n img = check_niimg(img)\n\n data = get_data(img)\n affine = img.affine\n\n cropped_data = data[tuple(slices)]\n if copy:\n cropped_data = cropped_data.copy()\n\n linear_part = affine[:3, :3]\n old_origin = affine[:3, 3]\n new_origin_voxel = np.array([s.start for s in slices])\n new_origin = old_origin + linear_part.dot(new_origin_voxel)\n\n new_affine = np.eye(4)\n new_affine[:3, :3] = linear_part\n new_affine[:3, 3] = new_origin\n\n return new_img_like(img, cropped_data, new_affine)\n\n\ndef crop_img(img, rtol=1e-8, copy=True, pad=True, return_offset=False):\n \"\"\"Crops an image as much as possible.\n\n Will crop `img`, removing as many zero entries as possible without\n touching non-zero entries. Will leave one voxel of zero padding\n around the obtained non-zero area in order to avoid sampling issues\n later on.\n\n Parameters\n ----------\n img : Niimg-like object\n Image to be cropped (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n rtol : :obj:`float`, optional\n relative tolerance (with respect to maximal absolute value of the\n image), under which values are considered negligeable and thus\n croppable. Default=1e-8.\n\n copy : :obj:`bool`, optional\n Specifies whether cropped data is copied or not. Default=True.\n\n pad : :obj:`bool`, optional\n Toggles adding 1-voxel of 0s around the border. Default=True.\n\n return_offset : :obj:`bool`, optional\n Specifies whether to return a tuple of the removed padding.\n Default=False.\n\n Returns\n -------\n Niimg-like object or :obj:`tuple`\n Cropped version of the input image and, if `return_offset=True`, a tuple\n of tuples representing the number of voxels removed (before, after) the\n cropped volumes, i.e.:\n *[(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)]*\n\n \"\"\"\n img = check_niimg(img)\n data = get_data(img)\n infinity_norm = max(-data.min(), data.max())\n passes_threshold = np.logical_or(data < -rtol * infinity_norm,\n data > rtol * infinity_norm)\n\n if data.ndim == 4:\n passes_threshold = np.any(passes_threshold, axis=-1)\n coords = np.array(np.where(passes_threshold))\n\n # Sets full range if no data are found along the axis\n if coords.shape[1] == 0:\n start, end = [0, 0, 0], list(data.shape)\n else:\n start = coords.min(axis=1)\n end = coords.max(axis=1) + 1\n\n # pad with one voxel to avoid resampling problems\n if pad:\n start = np.maximum(start - 1, 0)\n end = np.minimum(end + 1, data.shape[:3])\n\n slices = [slice(s, e) for s, e in zip(start, end)][:3]\n cropped_im = _crop_img_to(img, slices, copy=copy)\n return cropped_im if not return_offset else (cropped_im, tuple(slices))\n\n\ndef _pad_array(array, pad_sizes):\n \"\"\"Pad an array with zeros.\n\n Pads an array with zeros as specified in `pad_sizes`.\n\n Parameters\n ----------\n array : :class:`numpy.ndarray`\n Array to pad.\n\n pad_sizes : :obj:`list`\n Padding quantity specified as\n *[x1minpad, x1maxpad, x2minpad,x2maxpad, x3minpad, ...]*.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Padded array.\n\n Raises\n ------\n ValueError\n Inconsistent min/max padding quantities.\n\n \"\"\"\n if len(pad_sizes) % 2 != 0:\n raise ValueError(\"Please specify as many max paddings as min\"\n \" paddings. You have specified %d arguments\" %\n len(pad_sizes))\n\n all_paddings = np.zeros([array.ndim, 2], dtype=np.int64)\n all_paddings[:len(pad_sizes) // 2] = np.array(pad_sizes).reshape(-1, 2)\n\n lower_paddings, upper_paddings = all_paddings.T\n new_shape = np.array(array.shape) + upper_paddings + lower_paddings\n\n padded = np.zeros(new_shape, dtype=array.dtype)\n source_slices = [slice(max(-lp, 0), min(s + up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n array.shape)]\n target_slices = [slice(max(lp, 0), min(s - up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n new_shape)]\n\n padded[tuple(target_slices)] = array[source_slices].copy()\n return padded\n\n\ndef _compute_mean(imgs, target_affine=None,\n target_shape=None, smooth=False):\n from . import resampling\n input_repr = _repr_niimgs(imgs, shorten=True)\n\n imgs = check_niimg(imgs)\n mean_data = _safe_get_data(imgs)\n affine = imgs.affine\n # Free memory ASAP\n del imgs\n if mean_data.ndim not in (3, 4):\n raise ValueError('Computation expects 3D or 4D '\n 'images, but %i dimensions were given (%s)'\n % (mean_data.ndim, input_repr))\n if mean_data.ndim == 4:\n mean_data = mean_data.mean(axis=-1)\n else:\n mean_data = mean_data.copy()\n mean_data = resampling.resample_img(\n nibabel.Nifti1Image(mean_data, affine),\n target_affine=target_affine, target_shape=target_shape,\n copy=False)\n affine = mean_data.affine\n mean_data = get_data(mean_data)\n\n if smooth:\n nan_mask = np.isnan(mean_data)\n mean_data = _smooth_array(mean_data, affine=np.eye(4), fwhm=smooth,\n ensure_finite=True, copy=False)\n mean_data[nan_mask] = np.nan\n\n return mean_data, affine\n\n\ndef mean_img(imgs, target_affine=None, target_shape=None,\n verbose=0, n_jobs=1):\n \"\"\"Compute the mean of the images over time or the 4th dimension.\n\n Note that if list of 4D images are given, the mean of each 4D image is\n computed separately, and the resulting mean is computed after.\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects\n Images to be averaged over time (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n target_affine : :class:`numpy.ndarray`, optional\n If specified, the image is resampled corresponding to this new affine.\n target_affine can be a 3x3 or a 4x4 matrix.\n\n target_shape : :obj:`tuple` or :obj:`list`, optional\n If specified, the image will be resized to match this new shape.\n len(target_shape) must be equal to 3.\n A target_affine has to be specified jointly with target_shape.\n\n verbose : :obj:`int`, optional\n Controls the amount of verbosity: higher numbers give more messages\n (0 means no messages). Default=0.\n\n n_jobs : :obj:`int`, optional\n The number of CPUs to use to do the computation (-1 means\n 'all CPUs'). Default=1.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Mean image.\n\n See Also\n --------\n nilearn.image.math_img : For more general operations on images.\n\n \"\"\"\n is_str = isinstance(imgs, str)\n is_iterable = isinstance(imgs, collections.abc.Iterable)\n if is_str or not is_iterable:\n imgs = [imgs, ]\n\n imgs_iter = iter(imgs)\n first_img = check_niimg(next(imgs_iter))\n\n # Compute the first mean to retrieve the reference\n # target_affine and target_shape if_needed\n n_imgs = 1\n running_mean, first_affine = _compute_mean(first_img,\n target_affine=target_affine,\n target_shape=target_shape)\n\n if target_affine is None or target_shape is None:\n target_affine = first_affine\n target_shape = running_mean.shape[:3]\n\n for this_mean in Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(_compute_mean)(n, target_affine=target_affine,\n target_shape=target_shape)\n for n in imgs_iter):\n n_imgs += 1\n # _compute_mean returns (mean_img, affine)\n this_mean = this_mean[0]\n running_mean += this_mean\n\n running_mean = running_mean / float(n_imgs)\n return new_img_like(first_img, running_mean, target_affine)\n\n\ndef swap_img_hemispheres(img):\n \"\"\"Performs swapping of hemispheres in the indicated NIfTI image.\n\n Use case: synchronizing ROIs across hemispheres.\n\n Parameters\n ----------\n img : Niimg-like object\n Images to swap (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Hemispherically swapped image.\n\n Notes\n -----\n Assumes that the image is sagitally aligned.\n\n Should be used with caution (confusion might be caused with\n radio/neuro conventions)\n\n Note that this does not require a change of the affine matrix.\n\n \"\"\"\n from .resampling import reorder_img\n\n # Check input is really a path to a nifti file or a nifti object\n img = check_niimg_3d(img)\n\n # get nifti in x-y-z order\n img = reorder_img(img)\n\n # create swapped nifti object\n out_img = new_img_like(img, get_data(img)[::-1], img.affine,\n copy_header=True)\n\n return out_img\n\n\ndef index_img(imgs, index):\n \"\"\"Indexes into a 4D Niimg-like object in the fourth dimension.\n\n Common use cases include extracting a 3D image out of `img` or\n creating a 4D image whose data is a subset of `img` data.\n\n Parameters\n ----------\n imgs : 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n index : Any type compatible with numpy array indexing\n Used for indexing the 4D data array in the fourth dimension.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Indexed image.\n\n See Also\n --------\n nilearn.image.concat_imgs\n nilearn.image.iter_img\n\n Examples\n --------\n First we concatenate two MNI152 images to create a 4D-image::\n\n >>> from nilearn import datasets\n >>> from nilearn.image import concat_imgs, index_img\n >>> joint_mni_image = concat_imgs([datasets.load_mni152_template(),\n ... datasets.load_mni152_template()])\n >>> print(joint_mni_image.shape)\n (99, 117, 95, 2)\n\n We can now select one slice from the last dimension of this 4D-image::\n\n >>> single_mni_image = index_img(joint_mni_image, 1)\n >>> print(single_mni_image.shape)\n (99, 117, 95)\n\n We can also select multiple frames using the `slice` constructor::\n\n >>> five_mni_images = concat_imgs([datasets.load_mni152_template()] * 5)\n >>> print(five_mni_images.shape)\n (99, 117, 95, 5)\n\n >>> first_three_images = index_img(five_mni_images,\n ... slice(0, 3))\n >>> print(first_three_images.shape)\n (99, 117, 95, 3)\n\n \"\"\"\n imgs = check_niimg_4d(imgs)\n # duck-type for pandas arrays, and select the 'values' attr\n if hasattr(index, 'values') and hasattr(index, 'iloc'):\n index = index.values.flatten()\n return _index_img(imgs, index)\n\n\ndef iter_img(imgs):\n \"\"\"Iterates over a 4D Niimg-like object in the fourth dimension.\n\n Parameters\n ----------\n imgs : 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n Iterator of 3D :class:`~nibabel.nifti1.Nifti1Image`\n\n See Also\n --------\n nilearn.image.index_img\n\n \"\"\"\n return check_niimg_4d(imgs, return_iterator=True)\n\n\ndef new_img_like(ref_niimg, data, affine=None, copy_header=False):\n \"\"\"Create a new image of the same class as the reference image\n\n Parameters\n ----------\n ref_niimg : Niimg-like object\n Reference image. The new image will be of the same type.\n\n data : :class:`numpy.ndarray`\n Data to be stored in the image.\n\n affine : 4x4 :class:`numpy.ndarray`, optional\n Transformation matrix.\n\n copy_header : :obj:`bool`, optional\n Indicated if the header of the reference image should be used to\n create the new image. Default=False.\n\n Returns\n -------\n Niimg-like object\n A loaded image with the same type (and header) as the reference image.\n\n \"\"\"\n # Hand-written loading code to avoid too much memory consumption\n orig_ref_niimg = ref_niimg\n is_str = isinstance(ref_niimg, str)\n has_get_data = hasattr(ref_niimg, 'get_data')\n has_get_fdata = hasattr(ref_niimg, 'get_fdata')\n has_iter = hasattr(ref_niimg, '__iter__')\n has_affine = hasattr(ref_niimg, 'affine')\n if has_iter and not any([is_str, has_get_data, has_get_fdata]):\n ref_niimg = ref_niimg[0]\n is_str = isinstance(ref_niimg, str)\n has_get_data = hasattr(ref_niimg, 'get_data')\n has_get_fdata = hasattr(ref_niimg, 'get_fdata')\n has_affine = hasattr(ref_niimg, 'affine')\n if not ((has_get_data or has_get_fdata) and has_affine):\n if is_str:\n ref_niimg = nibabel.load(ref_niimg)\n else:\n raise TypeError(('The reference image should be a niimg, %r '\n 'was passed') % orig_ref_niimg)\n\n if affine is None:\n affine = ref_niimg.affine\n if data.dtype == bool:\n default_dtype = np.int8\n if isinstance(ref_niimg, nibabel.freesurfer.mghformat.MGHImage):\n default_dtype = np.uint8\n data = as_ndarray(data, dtype=default_dtype)\n header = None\n if copy_header:\n header = copy.deepcopy(ref_niimg.header)\n try:\n 'something' in header\n except TypeError:\n pass\n else:\n if 'scl_slope' in header:\n header['scl_slope'] = 0.\n if 'scl_inter' in header:\n header['scl_inter'] = 0.\n # 'glmax' is removed for Nifti2Image. Modify only if 'glmax' is\n # available in header. See issue #1611\n if 'glmax' in header:\n header['glmax'] = 0.\n if 'cal_max' in header:\n header['cal_max'] = np.max(data) if data.size > 0 else 0.\n if 'cal_min' in header:\n header['cal_min'] = np.min(data) if data.size > 0 else 0.\n klass = ref_niimg.__class__\n if klass is nibabel.Nifti1Pair:\n # Nifti1Pair is an internal class, without a to_filename,\n # we shouldn't return it\n klass = nibabel.Nifti1Image\n return klass(data, affine, header=header)\n\n\ndef _apply_cluster_size_threshold(arr, cluster_threshold, copy=True):\n \"\"\"Apply cluster-extent thresholding to an array that has already been\n voxel-wise thresholded.\n\n Parameters\n ----------\n arr : :obj:`numpy.ndarray` of shape (X, Y, Z)\n 3D array that has been thresholded at the voxel level.\n cluster_threshold : :obj:`float`\n Cluster-size threshold, in voxels, to apply to ``arr``.\n copy : :obj:`bool`, optional\n Whether to copy the array before modifying it or not.\n Default is True.\n\n Returns\n -------\n arr : :obj:`numpy.ndarray` of shape (X, Y, Z)\n Cluster-extent thresholded array.\n\n Notes\n -----\n Clusters are defined in a bi-sided manner;\n both negative and positive clusters are evaluated,\n but this is done separately for each sign.\n\n Clusters are defined using 6-connectivity, also known as NN1 (in AFNI) or\n \"faces\" connectivity.\n \"\"\"\n assert arr.ndim == 3\n\n if copy:\n arr = arr.copy()\n\n # Define array for 6-connectivity, aka NN1 or \"faces\"\n conn_mat = np.zeros((3, 3, 3), int)\n conn_mat[:, 1, 1] = 1\n conn_mat[1, :, 1] = 1\n conn_mat[1, 1, :] = 1\n\n for sign in np.sign(arr):\n # Binarize using one-sided cluster-defining threshold\n binarized = ((arr * sign) > 0).astype(int)\n\n # Apply cluster threshold\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n for c_val in clust_ids:\n if np.sum(label_map == c_val) < cluster_threshold:\n arr[label_map == c_val] = 0\n\n return arr\n\n\ndef threshold_img(\n img,\n threshold,\n cluster_threshold=0,\n two_sided=True,\n mask_img=None,\n copy=True,\n):\n \"\"\"Threshold the given input image, mostly statistical or atlas images.\n\n Thresholding can be done based on direct image intensities or selection\n threshold with given percentile.\n\n .. versionchanged:: 0.9.0\n New ``cluster_threshold`` and ``two_sided`` parameters added.\n\n .. versionadded:: 0.2\n\n Parameters\n ----------\n img : a 3D/4D Niimg-like object\n Image containing statistical or atlas maps which should be thresholded.\n\n threshold : :obj:`float` or :obj:`str`\n If float, we threshold the image based on image intensities meaning\n voxels which have intensities greater than this value will be kept.\n The given value should be within the range of minimum and\n maximum intensity of the input image.\n If string, it should finish with percent sign e.g. \"80%\" and we threshold\n based on the score obtained using this percentile on the image data. The\n voxels which have intensities greater than this score will be kept.\n The given string should be within the range of \"0%\" to \"100%\".\n\n cluster_threshold : :obj:`float`, optional\n Cluster size threshold, in voxels. In the returned thresholded map,\n sets of connected voxels (``clusters``) with size smaller\n than this number will be removed. Default=0.\n\n .. versionadded:: 0.9.0\n\n two_sided : :obj:`bool`, optional\n Whether the thresholding should yield both positive and negative\n part of the maps.\n Default=True.\n\n .. versionadded:: 0.9.0\n\n mask_img : Niimg-like object, default None, optional\n Mask image applied to mask the input data.\n If None, no masking will be applied.\n\n copy : :obj:`bool`, optional\n If True, input array is not modified. True by default: the filtering\n is not performed in-place. Default=True.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Thresholded image of the given input image.\n\n See also\n --------\n nilearn.glm.threshold_stats_img :\n Threshold a statistical image using the alpha value, optionally with\n false positive control.\n\n \"\"\"\n from . import resampling\n from .. import masking\n\n img = check_niimg(img)\n img_data = _safe_get_data(img, ensure_finite=True, copy_data=copy)\n affine = img.affine\n\n if mask_img is not None:\n mask_img = check_niimg_3d(mask_img)\n if not _check_same_fov(img, mask_img):\n mask_img = resampling.resample_img(mask_img, target_affine=affine,\n target_shape=img.shape[:3],\n interpolation=\"nearest\")\n\n mask_data, _ = masking._load_mask_img(mask_img)\n # Set as 0 for the values which are outside of the mask\n img_data[mask_data == 0.] = 0.\n\n cutoff_threshold = check_threshold(\n threshold,\n img_data,\n percentile_func=scoreatpercentile,\n name='threshold',\n )\n\n # Apply threshold\n if two_sided:\n img_data[np.abs(img_data) < cutoff_threshold] = 0.\n else:\n img_data[img_data < cutoff_threshold] = 0.\n\n # Expand to 4D to support both 3D and 4D\n expand_to_4d = img_data.ndim == 3\n if expand_to_4d:\n img_data = img_data[:, :, :, None]\n\n # Perform cluster thresholding, if requested\n if cluster_threshold > 0:\n for i_vol in range(img_data.shape[3]):\n img_data[..., i_vol] = _apply_cluster_size_threshold(\n img_data[..., i_vol],\n cluster_threshold,\n )\n\n if expand_to_4d:\n # Reduce back to 3D\n img_data = img_data[:, :, :, 0]\n\n # Reconstitute img object\n thresholded_img = new_img_like(img, img_data, affine)\n\n return thresholded_img\n\n\ndef math_img(formula, **imgs):\n \"\"\"Interpret a numpy based string formula using niimg in named parameters.\n\n .. versionadded:: 0.2.3\n\n Parameters\n ----------\n formula : :obj:`str`\n The mathematical formula to apply to image internal data. It can use\n numpy imported as 'np'.\n\n imgs : images (:class:`~nibabel.nifti1.Nifti1Image` or file names)\n Keyword arguments corresponding to the variables in the formula as\n Nifti images. All input images should have the same geometry (shape,\n affine).\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Result of the formula as a Nifti image. Note that the dimension of the\n result image can be smaller than the input image. The affine is the\n same as the input image.\n\n See Also\n --------\n nilearn.image.mean_img : To simply compute the mean of multiple images\n\n Examples\n --------\n Let's load an image using nilearn datasets module::\n\n >>> from nilearn import datasets\n >>> anatomical_image = datasets.load_mni152_template()\n\n Now we can use any numpy function on this image::\n\n >>> from nilearn.image import math_img\n >>> log_img = math_img(\"np.log(img)\", img=anatomical_image)\n\n We can also apply mathematical operations on several images::\n\n >>> result_img = math_img(\"img1 + img2\",\n ... img1=anatomical_image, img2=log_img)\n\n Notes\n -----\n This function is the Python equivalent of ImCal in SPM or fslmaths\n in FSL.\n\n \"\"\"\n try:\n # Check that input images are valid niimg and have a compatible shape\n # and affine.\n niimgs = []\n for image in imgs.values():\n niimgs.append(check_niimg(image))\n _check_same_fov(*niimgs, raise_error=True)\n except Exception as exc:\n exc.args = ((\"Input images cannot be compared, you provided '{0}',\"\n .format(imgs.values()),) + exc.args)\n raise\n\n # Computing input data as a dictionary of numpy arrays. Keep a reference\n # niimg for building the result as a new niimg.\n niimg = None\n data_dict = {}\n for key, img in imgs.items():\n niimg = check_niimg(img)\n data_dict[key] = _safe_get_data(niimg)\n\n # Add a reference to numpy in the kwargs of eval so that numpy functions\n # can be called from there.\n data_dict['np'] = np\n try:\n result = eval(formula, data_dict)\n except Exception as exc:\n exc.args = ((\"Input formula couldn't be processed, you provided '{0}',\"\n .format(formula),) + exc.args)\n raise\n\n return new_img_like(niimg, result, niimg.affine)\n\n\ndef binarize_img(img, threshold=0, mask_img=None):\n \"\"\"Binarize an image such that its values are either 0 or 1.\n\n .. versionadded:: 0.8.1\n\n Parameters\n ----------\n img : a 3D/4D Niimg-like object\n Image which should be binarized.\n\n threshold : :obj:`float` or :obj:`str`\n If float, we threshold the image based on image intensities meaning\n voxels which have intensities greater than this value will be kept.\n The given value should be within the range of minimum and\n maximum intensity of the input image.\n If string, it should finish with percent sign e.g. \"80%\" and we\n threshold based on the score obtained using this percentile on\n the image data. The voxels which have intensities greater than\n this score will be kept. The given string should be\n within the range of \"0%\" to \"100%\".\n\n mask_img : Niimg-like object, default None, optional\n Mask image applied to mask the input data.\n If None, no masking will be applied.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Binarized version of the given input image. Output dtype is int.\n\n See Also\n --------\n nilearn.image.threshold_img : To simply threshold but not binarize images.\n\n Examples\n --------\n Let's load an image using nilearn datasets module::\n\n >>> from nilearn import datasets\n >>> anatomical_image = datasets.load_mni152_template()\n\n Now we binarize it, generating a pseudo brainmask::\n\n >>> from nilearn.image import binarize_img\n >>> img = binarize_img(anatomical_image)\n\n \"\"\"\n return math_img(\n \"img.astype(bool).astype(int)\",\n img=threshold_img(img, threshold, mask_img=mask_img)\n )\n\n\n@rename_parameters({'sessions': 'runs'}, '0.10.0')\ndef clean_img(imgs, runs=None, detrend=True, standardize=True,\n confounds=None, low_pass=None, high_pass=None, t_r=None,\n ensure_finite=False, mask_img=None):\n \"\"\"Improve SNR on masked fMRI signals.\n\n This function can do several things on the input signals, in\n the following order:\n\n - detrend\n - low- and high-pass filter\n - remove confounds\n - standardize\n\n Low-pass filtering improves specificity.\n\n High-pass filtering should be kept small, to keep some sensitivity.\n\n Filtering is only meaningful on evenly-sampled signals.\n\n According to Lindquist et al. (2018), removal of confounds will be done\n orthogonally to temporal filters (low- and/or high-pass filters), if both\n are specified.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n imgs : Niimg-like object\n 4D image. The signals in the last dimension are filtered (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n runs : :class:`numpy.ndarray`, optional\n Add a run level to the cleaning process. Each run will be\n cleaned independently. Must be a 1D array of n_samples elements.\n\n .. warning::\n\n 'runs' replaces 'sessions' after release 0.10.0.\n Using 'session' will result in an error after release 0.10.0.\n\n Default=``None``.\n\n detrend : :obj:`bool`, optional\n If detrending should be applied on timeseries (before confound removal).\n Default=True.\n\n standardize : :obj:`bool`, optional\n If True, returned signals are set to unit variance. Default=True.\n\n confounds : :class:`numpy.ndarray`, :obj:`str` or :obj:`list` of\n Confounds timeseries. optional\n Shape must be (instant number, confound number), or just (instant number,)\n The number of time instants in signals and confounds must be\n identical (i.e. signals.shape[0] == confounds.shape[0]).\n If a string is provided, it is assumed to be the name of a csv file\n containing signals as columns, with an optional one-line header.\n If a list is provided, all confounds are removed from the input\n signal, as if all were in the same array.\n\n low_pass : :obj:`float`, optional\n Low cutoff frequencies, in Hertz.\n\n high_pass : :obj:`float`, optional\n High cutoff frequencies, in Hertz.\n\n t_r : :obj:`float`, optional\n Repetition time, in second (sampling period). Set to None if not\n specified. Mandatory if used together with `low_pass` or `high_pass`.\n\n ensure_finite : :obj:`bool`, optional\n If True, the non-finite values (NaNs and infs) found in the images\n will be replaced by zeros. Default=False.\n\n mask_img : Niimg-like object, optional\n If provided, signal is only cleaned from voxels inside the mask. If\n mask is provided, it should have same shape and affine as imgs.\n If not provided, all voxels are used.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n Niimg-like object\n Input images, cleaned. Same shape as `imgs`.\n\n Notes\n -----\n Confounds removal is based on a projection on the orthogonal\n of the signal space [:footcite:`friston1994statistical`].\n\n Orthogonalization between temporal filters and confound removal is based on\n suggestions in [:footcite:`Lindquist407676`].\n\n References\n ----------\n .. footbibliography::\n\n See Also\n --------\n nilearn.signal.clean\n\n \"\"\"\n # Avoid circular import\n from .image import new_img_like\n from .. import masking\n\n imgs_ = check_niimg_4d(imgs)\n\n # Check if t_r is set, otherwise propose t_r from imgs header\n if low_pass is not None or high_pass is not None:\n if t_r is None:\n\n # We raise an error, instead of using the header's t_r as this\n # value is considered to be non-reliable\n raise ValueError(\n \"Repetition time (t_r) must be specified for filtering. You \"\n \"specified None. imgs header suggest it to be {0}\".format(\n imgs.header.get_zooms()[3]))\n\n # Prepare signal for cleaning\n if mask_img is not None:\n signals = masking.apply_mask(imgs_, mask_img)\n else:\n signals = get_data(imgs_).reshape(-1, imgs_.shape[-1]).T\n\n # Clean signal\n data = signal.clean(\n signals, runs=runs, detrend=detrend, standardize=standardize,\n confounds=confounds, low_pass=low_pass, high_pass=high_pass, t_r=t_r,\n ensure_finite=ensure_finite)\n\n # Put results back into Niimg-like object\n if mask_img is not None:\n imgs_ = masking.unmask(data, mask_img)\n else:\n imgs_ = new_img_like(\n imgs_, data.T.reshape(imgs_.shape), copy_header=True)\n\n return imgs_\n\n\ndef load_img(img, wildcards=True, dtype=None):\n \"\"\"Load a Niimg-like object from filenames or list of filenames.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n img : Niimg-like object\n If string, consider it as a path to NIfTI image and call `nibabel.load()`\n on it. The '~' symbol is expanded to the user home folder.\n If it is an object, check if affine attribute is present, raise\n `TypeError` otherwise.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n wildcards : :obj:`bool`, optional\n Use `img` as a regular expression to get a list of matching input\n filenames.\n If multiple files match, the returned list is sorted using an ascending\n order.\n If no file matches the regular expression, a `ValueError` exception is\n raised.\n Default=True.\n\n dtype : {dtype, \"auto\"}, optional\n Data type toward which the data should be converted. If \"auto\", the\n data will be converted to int32 if dtype is discrete and float32 if it\n is continuous.\n\n Returns\n -------\n 3D/4D Niimg-like object\n Result can be :class:`~nibabel.nifti1.Nifti1Image` or the input, as-is. It is guaranteed\n that the returned object has an affine attributes and that\n nilearn.image.get_data returns its data.\n\n \"\"\"\n return check_niimg(img, wildcards=wildcards, dtype=dtype)\n\n\ndef largest_connected_component_img(imgs):\n \"\"\"Return the largest connected component of an image or list of images.\n\n .. versionadded:: 0.3.1\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects (3D)\n Image(s) to extract the largest connected component from.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n 3D Niimg-like object or list of\n Image or list of images containing the largest connected component.\n\n Notes\n -----\n **Handling big-endian in given Nifti image**\n This function changes the existing byte-ordering information to new byte\n order, if the dtype in given Nifti image has non-native data type.\n This operation is done internally to avoid big-endian issues with\n scipy ndimage module.\n\n \"\"\"\n from .._utils.ndimage import largest_connected_component\n\n if hasattr(imgs, \"__iter__\") and not isinstance(imgs, str):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg_3d(img)\n affine = img.affine\n largest_component = largest_connected_component(_safe_get_data(img))\n ret.append(new_img_like(img, largest_component, affine,\n copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n", "path": "nilearn/image/image.py" } ]
[ { "content": "\"\"\"\nPreprocessing functions for images.\n\nSee also nilearn.signal.\n\"\"\"\n# Authors: Philippe Gervais, Alexandre Abraham\n# License: simplified BSD\n\nimport collections.abc\nimport copy\nimport warnings\n\nimport nibabel\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom scipy import ndimage\nfrom scipy.stats import scoreatpercentile\n\nfrom .. import signal\nfrom .._utils import (_repr_niimgs,\n as_ndarray,\n check_niimg,\n check_niimg_3d,\n check_niimg_4d,\n fill_doc)\nfrom .._utils.niimg import _get_data, _safe_get_data\nfrom .._utils.niimg_conversions import _check_same_fov, _index_img\nfrom .._utils.param_validation import check_threshold\nfrom .._utils.helpers import rename_parameters\n\n\ndef get_data(img):\n \"\"\"Get the image data as a :class:`numpy.ndarray`.\n\n Parameters\n ----------\n img : Niimg-like object or iterable of Niimg-like objects\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n :class:`numpy.ndarray`\n 3D or 4D numpy array depending on the shape of `img`. This function\n preserves the type of the image data. If `img` is an in-memory Nifti image\n it returns the image data array itself -- not a copy.\n\n \"\"\"\n img = check_niimg(img)\n return _get_data(img)\n\n\ndef high_variance_confounds(imgs, n_confounds=5, percentile=2.,\n detrend=True, mask_img=None):\n \"\"\" Return confounds signals extracted from input signals with highest\n variance.\n\n Parameters\n ----------\n imgs : Niimg-like object\n 4D image.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n mask_img : Niimg-like object\n If not provided, all voxels are used.\n If provided, confounds are extracted from voxels inside the mask.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n n_confounds : :obj:`int`, optional\n Number of confounds to return. Default=5.\n\n percentile : :obj:`float`, optional\n Highest-variance signals percentile to keep before computing the\n singular value decomposition, 0. <= `percentile` <= 100.\n `mask_img.sum() * percentile / 100` must be greater than `n_confounds`.\n Default=2.\n\n detrend : :obj:`bool`, optional\n If True, detrend signals before processing. Default=True.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Highest variance confounds. Shape: *(number_of_scans, n_confounds)*.\n\n Notes\n ------\n This method is related to what has been published in the literature\n as 'CompCor' (Behzadi NeuroImage 2007).\n\n The implemented algorithm does the following:\n\n - Computes the sum of squares for each signal (no mean removal).\n - Keeps a given percentile of signals with highest variance (percentile).\n - Computes an SVD of the extracted signals.\n - Returns a given number (n_confounds) of signals from the SVD with\n highest singular values.\n\n See also\n --------\n nilearn.signal.high_variance_confounds\n\n \"\"\"\n from .. import masking\n\n if mask_img is not None:\n sigs = masking.apply_mask(imgs, mask_img)\n else:\n # Load the data only if it doesn't need to be masked\n imgs = check_niimg_4d(imgs)\n sigs = as_ndarray(get_data(imgs))\n # Not using apply_mask here saves memory in most cases.\n del imgs # help reduce memory consumption\n sigs = np.reshape(sigs, (-1, sigs.shape[-1])).T\n\n return signal.high_variance_confounds(sigs, n_confounds=n_confounds,\n percentile=percentile,\n detrend=detrend)\n\n\ndef _fast_smooth_array(arr):\n \"\"\"Simple smoothing which is less computationally expensive than\n applying a Gaussian filter.\n\n Only the first three dimensions of the array will be smoothed. The\n filter uses [0.2, 1, 0.2] weights in each direction and use a\n normalisation to preserve the local average value.\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n 4D array, with image number as last dimension. 3D arrays are\n also accepted.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Smoothed array.\n\n Notes\n -----\n Rather than calling this function directly, users are encouraged\n to call the high-level function :func:`smooth_img` with\n `fwhm='fast'`.\n\n \"\"\"\n neighbor_weight = 0.2\n # 6 neighbors in 3D if not on an edge\n nb_neighbors = 6\n # This scale ensures that a uniform array stays uniform\n # except on the array edges\n scale = 1 + nb_neighbors * neighbor_weight\n\n # Need to copy because the smoothing is done in multiple statements\n # and there does not seem to be an easy way to do it in place\n smoothed_arr = arr.copy()\n weighted_arr = neighbor_weight * arr\n\n smoothed_arr[:-1] += weighted_arr[1:]\n smoothed_arr[1:] += weighted_arr[:-1]\n smoothed_arr[:, :-1] += weighted_arr[:, 1:]\n smoothed_arr[:, 1:] += weighted_arr[:, :-1]\n smoothed_arr[:, :, :-1] += weighted_arr[:, :, 1:]\n smoothed_arr[:, :, 1:] += weighted_arr[:, :, :-1]\n smoothed_arr /= scale\n\n return smoothed_arr\n\n\n@fill_doc\ndef _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of `arr`.\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n 4D array, with image number as last dimension. 3D arrays are also\n accepted.\n\n affine : :class:`numpy.ndarray`\n (4, 4) matrix, giving affine transformation for image. (3, 3) matrices\n are also accepted (only these coefficients are used).\n If `fwhm='fast'`, the affine is not used and can be None.\n %(fwhm)s\n ensure_finite : :obj:`bool`, optional\n If True, replace every non-finite values (like NaNs) by zero before\n filtering. Default=True.\n\n copy : :obj:`bool`, optional\n If True, input array is not modified. True by default: the filtering\n is not performed in-place. Default=True.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Filtered `arr`.\n\n Notes\n -----\n This function is most efficient with arr in C order.\n\n \"\"\"\n # Here, we have to investigate use cases of fwhm. Particularly, if fwhm=0.\n # See issue #1537\n if isinstance(fwhm, (int, float)) and (fwhm == 0.0):\n warnings.warn(\"The parameter 'fwhm' for smoothing is specified \"\n \"as {0}. Setting it to None \"\n \"(no smoothing will be performed)\"\n .format(fwhm))\n fwhm = None\n if arr.dtype.kind == 'i':\n if arr.dtype == np.int64:\n arr = arr.astype(np.float64)\n else:\n arr = arr.astype(np.float32) # We don't need crazy precision.\n if copy:\n arr = arr.copy()\n if ensure_finite:\n # SPM tends to put NaNs in the data outside the brain\n arr[np.logical_not(np.isfinite(arr))] = 0\n if isinstance(fwhm, str) and (fwhm == 'fast'):\n arr = _fast_smooth_array(arr)\n elif fwhm is not None:\n fwhm = np.asarray([fwhm]).ravel()\n fwhm = np.asarray([0. if elem is None else elem for elem in fwhm])\n affine = affine[:3, :3] # Keep only the scale part.\n fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2)) # FWHM to sigma.\n vox_size = np.sqrt(np.sum(affine ** 2, axis=0))\n sigma = fwhm / (fwhm_over_sigma_ratio * vox_size)\n for n, s in enumerate(sigma):\n if s > 0.0:\n ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)\n return arr\n\n\n@fill_doc\ndef smooth_img(imgs, fwhm):\n \"\"\"Smooth images by applying a Gaussian filter.\n\n Apply a Gaussian filter along the three first dimensions of `arr`.\n In all cases, non-finite values in input image are replaced by zeros.\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects\n Image(s) to smooth (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n %(fwhm)s\n\n Returns\n -------\n :class:`nibabel.nifti1.Nifti1Image` or list of\n Filtered input image. If `imgs` is an iterable, then `filtered_img` is a\n list.\n\n \"\"\"\n\n # Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug\n # See http://bugs.python.org/issue7624\n if hasattr(imgs, \"__iter__\") \\\n and not isinstance(imgs, str):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg(img)\n affine = img.affine\n filtered = _smooth_array(get_data(img), affine, fwhm=fwhm,\n ensure_finite=True, copy=True)\n ret.append(new_img_like(img, filtered, affine, copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n\n\ndef _crop_img_to(img, slices, copy=True):\n \"\"\"Crops an image to a smaller size.\n\n Crop `img` to size indicated by slices and adjust affine accordingly.\n\n Parameters\n ----------\n img : Niimg-like object\n Image to be cropped. If slices has less entries than `img` has dimensions,\n the slices will be applied to the first `len(slices)` dimensions (See\n http://nilearn.github.io/manipulating_images/input_output.html).\n\n slices : list of slices\n Defines the range of the crop.\n E.g. [slice(20, 200), slice(40, 150), slice(0, 100)] defines a cube.\n\n copy : :obj:`bool`, optional\n Specifies whether cropped data is to be copied or not. Default=True.\n\n Returns\n -------\n Niimg-like object\n Cropped version of the input image.\n\n offset : :obj:`list`, optional\n List of tuples representing the number of voxels removed (before, after)\n the cropped volumes, i.e.:\n *[(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)]*\n\n \"\"\"\n img = check_niimg(img)\n\n data = get_data(img)\n affine = img.affine\n\n cropped_data = data[tuple(slices)]\n if copy:\n cropped_data = cropped_data.copy()\n\n linear_part = affine[:3, :3]\n old_origin = affine[:3, 3]\n new_origin_voxel = np.array([s.start for s in slices])\n new_origin = old_origin + linear_part.dot(new_origin_voxel)\n\n new_affine = np.eye(4)\n new_affine[:3, :3] = linear_part\n new_affine[:3, 3] = new_origin\n\n return new_img_like(img, cropped_data, new_affine)\n\n\ndef crop_img(img, rtol=1e-8, copy=True, pad=True, return_offset=False):\n \"\"\"Crops an image as much as possible.\n\n Will crop `img`, removing as many zero entries as possible without\n touching non-zero entries. Will leave one voxel of zero padding\n around the obtained non-zero area in order to avoid sampling issues\n later on.\n\n Parameters\n ----------\n img : Niimg-like object\n Image to be cropped (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n rtol : :obj:`float`, optional\n relative tolerance (with respect to maximal absolute value of the\n image), under which values are considered negligeable and thus\n croppable. Default=1e-8.\n\n copy : :obj:`bool`, optional\n Specifies whether cropped data is copied or not. Default=True.\n\n pad : :obj:`bool`, optional\n Toggles adding 1-voxel of 0s around the border. Default=True.\n\n return_offset : :obj:`bool`, optional\n Specifies whether to return a tuple of the removed padding.\n Default=False.\n\n Returns\n -------\n Niimg-like object or :obj:`tuple`\n Cropped version of the input image and, if `return_offset=True`, a tuple\n of tuples representing the number of voxels removed (before, after) the\n cropped volumes, i.e.:\n *[(x1_pre, x1_post), (x2_pre, x2_post), ..., (xN_pre, xN_post)]*\n\n \"\"\"\n img = check_niimg(img)\n data = get_data(img)\n infinity_norm = max(-data.min(), data.max())\n passes_threshold = np.logical_or(data < -rtol * infinity_norm,\n data > rtol * infinity_norm)\n\n if data.ndim == 4:\n passes_threshold = np.any(passes_threshold, axis=-1)\n coords = np.array(np.where(passes_threshold))\n\n # Sets full range if no data are found along the axis\n if coords.shape[1] == 0:\n start, end = [0, 0, 0], list(data.shape)\n else:\n start = coords.min(axis=1)\n end = coords.max(axis=1) + 1\n\n # pad with one voxel to avoid resampling problems\n if pad:\n start = np.maximum(start - 1, 0)\n end = np.minimum(end + 1, data.shape[:3])\n\n slices = [slice(s, e) for s, e in zip(start, end)][:3]\n cropped_im = _crop_img_to(img, slices, copy=copy)\n return cropped_im if not return_offset else (cropped_im, tuple(slices))\n\n\ndef _pad_array(array, pad_sizes):\n \"\"\"Pad an array with zeros.\n\n Pads an array with zeros as specified in `pad_sizes`.\n\n Parameters\n ----------\n array : :class:`numpy.ndarray`\n Array to pad.\n\n pad_sizes : :obj:`list`\n Padding quantity specified as\n *[x1minpad, x1maxpad, x2minpad,x2maxpad, x3minpad, ...]*.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Padded array.\n\n Raises\n ------\n ValueError\n Inconsistent min/max padding quantities.\n\n \"\"\"\n if len(pad_sizes) % 2 != 0:\n raise ValueError(\"Please specify as many max paddings as min\"\n \" paddings. You have specified %d arguments\" %\n len(pad_sizes))\n\n all_paddings = np.zeros([array.ndim, 2], dtype=np.int64)\n all_paddings[:len(pad_sizes) // 2] = np.array(pad_sizes).reshape(-1, 2)\n\n lower_paddings, upper_paddings = all_paddings.T\n new_shape = np.array(array.shape) + upper_paddings + lower_paddings\n\n padded = np.zeros(new_shape, dtype=array.dtype)\n source_slices = [slice(max(-lp, 0), min(s + up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n array.shape)]\n target_slices = [slice(max(lp, 0), min(s - up, s))\n for lp, up, s in zip(lower_paddings,\n upper_paddings,\n new_shape)]\n\n padded[tuple(target_slices)] = array[source_slices].copy()\n return padded\n\n\ndef _compute_mean(imgs, target_affine=None,\n target_shape=None, smooth=False):\n from . import resampling\n input_repr = _repr_niimgs(imgs, shorten=True)\n\n imgs = check_niimg(imgs)\n mean_data = _safe_get_data(imgs)\n affine = imgs.affine\n # Free memory ASAP\n del imgs\n if mean_data.ndim not in (3, 4):\n raise ValueError('Computation expects 3D or 4D '\n 'images, but %i dimensions were given (%s)'\n % (mean_data.ndim, input_repr))\n if mean_data.ndim == 4:\n mean_data = mean_data.mean(axis=-1)\n else:\n mean_data = mean_data.copy()\n mean_data = resampling.resample_img(\n nibabel.Nifti1Image(mean_data, affine),\n target_affine=target_affine, target_shape=target_shape,\n copy=False)\n affine = mean_data.affine\n mean_data = get_data(mean_data)\n\n if smooth:\n nan_mask = np.isnan(mean_data)\n mean_data = _smooth_array(mean_data, affine=np.eye(4), fwhm=smooth,\n ensure_finite=True, copy=False)\n mean_data[nan_mask] = np.nan\n\n return mean_data, affine\n\n\ndef mean_img(imgs, target_affine=None, target_shape=None,\n verbose=0, n_jobs=1):\n \"\"\"Compute the mean of the images over time or the 4th dimension.\n\n Note that if list of 4D images are given, the mean of each 4D image is\n computed separately, and the resulting mean is computed after.\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects\n Images to be averaged over time (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n target_affine : :class:`numpy.ndarray`, optional\n If specified, the image is resampled corresponding to this new affine.\n target_affine can be a 3x3 or a 4x4 matrix.\n\n target_shape : :obj:`tuple` or :obj:`list`, optional\n If specified, the image will be resized to match this new shape.\n len(target_shape) must be equal to 3.\n A target_affine has to be specified jointly with target_shape.\n\n verbose : :obj:`int`, optional\n Controls the amount of verbosity: higher numbers give more messages\n (0 means no messages). Default=0.\n\n n_jobs : :obj:`int`, optional\n The number of CPUs to use to do the computation (-1 means\n 'all CPUs'). Default=1.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Mean image.\n\n See Also\n --------\n nilearn.image.math_img : For more general operations on images.\n\n \"\"\"\n is_str = isinstance(imgs, str)\n is_iterable = isinstance(imgs, collections.abc.Iterable)\n if is_str or not is_iterable:\n imgs = [imgs, ]\n\n imgs_iter = iter(imgs)\n first_img = check_niimg(next(imgs_iter))\n\n # Compute the first mean to retrieve the reference\n # target_affine and target_shape if_needed\n n_imgs = 1\n running_mean, first_affine = _compute_mean(first_img,\n target_affine=target_affine,\n target_shape=target_shape)\n\n if target_affine is None or target_shape is None:\n target_affine = first_affine\n target_shape = running_mean.shape[:3]\n\n for this_mean in Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(_compute_mean)(n, target_affine=target_affine,\n target_shape=target_shape)\n for n in imgs_iter):\n n_imgs += 1\n # _compute_mean returns (mean_img, affine)\n this_mean = this_mean[0]\n running_mean += this_mean\n\n running_mean = running_mean / float(n_imgs)\n return new_img_like(first_img, running_mean, target_affine)\n\n\ndef swap_img_hemispheres(img):\n \"\"\"Performs swapping of hemispheres in the indicated NIfTI image.\n\n Use case: synchronizing ROIs across hemispheres.\n\n Parameters\n ----------\n img : Niimg-like object\n Images to swap (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Hemispherically swapped image.\n\n Notes\n -----\n Assumes that the image is sagitally aligned.\n\n Should be used with caution (confusion might be caused with\n radio/neuro conventions)\n\n Note that this does not require a change of the affine matrix.\n\n \"\"\"\n from .resampling import reorder_img\n\n # Check input is really a path to a nifti file or a nifti object\n img = check_niimg_3d(img)\n\n # get nifti in x-y-z order\n img = reorder_img(img)\n\n # create swapped nifti object\n out_img = new_img_like(img, get_data(img)[::-1], img.affine,\n copy_header=True)\n\n return out_img\n\n\ndef index_img(imgs, index):\n \"\"\"Indexes into a 4D Niimg-like object in the fourth dimension.\n\n Common use cases include extracting a 3D image out of `img` or\n creating a 4D image whose data is a subset of `img` data.\n\n Parameters\n ----------\n imgs : 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n index : Any type compatible with numpy array indexing\n Used for indexing the 4D data array in the fourth dimension.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Indexed image.\n\n See Also\n --------\n nilearn.image.concat_imgs\n nilearn.image.iter_img\n\n Examples\n --------\n First we concatenate two MNI152 images to create a 4D-image::\n\n >>> from nilearn import datasets\n >>> from nilearn.image import concat_imgs, index_img\n >>> joint_mni_image = concat_imgs([datasets.load_mni152_template(),\n ... datasets.load_mni152_template()])\n >>> print(joint_mni_image.shape)\n (99, 117, 95, 2)\n\n We can now select one slice from the last dimension of this 4D-image::\n\n >>> single_mni_image = index_img(joint_mni_image, 1)\n >>> print(single_mni_image.shape)\n (99, 117, 95)\n\n We can also select multiple frames using the `slice` constructor::\n\n >>> five_mni_images = concat_imgs([datasets.load_mni152_template()] * 5)\n >>> print(five_mni_images.shape)\n (99, 117, 95, 5)\n\n >>> first_three_images = index_img(five_mni_images,\n ... slice(0, 3))\n >>> print(first_three_images.shape)\n (99, 117, 95, 3)\n\n \"\"\"\n imgs = check_niimg_4d(imgs)\n # duck-type for pandas arrays, and select the 'values' attr\n if hasattr(index, 'values') and hasattr(index, 'iloc'):\n index = index.values.flatten()\n return _index_img(imgs, index)\n\n\ndef iter_img(imgs):\n \"\"\"Iterates over a 4D Niimg-like object in the fourth dimension.\n\n Parameters\n ----------\n imgs : 4D Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n Iterator of 3D :class:`~nibabel.nifti1.Nifti1Image`\n\n See Also\n --------\n nilearn.image.index_img\n\n \"\"\"\n return check_niimg_4d(imgs, return_iterator=True)\n\n\ndef new_img_like(ref_niimg, data, affine=None, copy_header=False):\n \"\"\"Create a new image of the same class as the reference image\n\n Parameters\n ----------\n ref_niimg : Niimg-like object\n Reference image. The new image will be of the same type.\n\n data : :class:`numpy.ndarray`\n Data to be stored in the image.\n\n affine : 4x4 :class:`numpy.ndarray`, optional\n Transformation matrix.\n\n copy_header : :obj:`bool`, optional\n Indicated if the header of the reference image should be used to\n create the new image. Default=False.\n\n Returns\n -------\n Niimg-like object\n A loaded image with the same file type (and, optionally, header)\n as the reference image.\n\n \"\"\"\n # Hand-written loading code to avoid too much memory consumption\n orig_ref_niimg = ref_niimg\n is_str = isinstance(ref_niimg, str)\n has_get_data = hasattr(ref_niimg, 'get_data')\n has_get_fdata = hasattr(ref_niimg, 'get_fdata')\n has_iter = hasattr(ref_niimg, '__iter__')\n has_affine = hasattr(ref_niimg, 'affine')\n if has_iter and not any([is_str, has_get_data, has_get_fdata]):\n ref_niimg = ref_niimg[0]\n is_str = isinstance(ref_niimg, str)\n has_get_data = hasattr(ref_niimg, 'get_data')\n has_get_fdata = hasattr(ref_niimg, 'get_fdata')\n has_affine = hasattr(ref_niimg, 'affine')\n if not ((has_get_data or has_get_fdata) and has_affine):\n if is_str:\n ref_niimg = nibabel.load(ref_niimg)\n else:\n raise TypeError(('The reference image should be a niimg, %r '\n 'was passed') % orig_ref_niimg)\n\n if affine is None:\n affine = ref_niimg.affine\n if data.dtype == bool:\n default_dtype = np.int8\n if isinstance(ref_niimg, nibabel.freesurfer.mghformat.MGHImage):\n default_dtype = np.uint8\n data = as_ndarray(data, dtype=default_dtype)\n header = None\n if copy_header:\n header = copy.deepcopy(ref_niimg.header)\n try:\n 'something' in header\n except TypeError:\n pass\n else:\n if 'scl_slope' in header:\n header['scl_slope'] = 0.\n if 'scl_inter' in header:\n header['scl_inter'] = 0.\n # 'glmax' is removed for Nifti2Image. Modify only if 'glmax' is\n # available in header. See issue #1611\n if 'glmax' in header:\n header['glmax'] = 0.\n if 'cal_max' in header:\n header['cal_max'] = np.max(data) if data.size > 0 else 0.\n if 'cal_min' in header:\n header['cal_min'] = np.min(data) if data.size > 0 else 0.\n klass = ref_niimg.__class__\n if klass is nibabel.Nifti1Pair:\n # Nifti1Pair is an internal class, without a to_filename,\n # we shouldn't return it\n klass = nibabel.Nifti1Image\n return klass(data, affine, header=header)\n\n\ndef _apply_cluster_size_threshold(arr, cluster_threshold, copy=True):\n \"\"\"Apply cluster-extent thresholding to an array that has already been\n voxel-wise thresholded.\n\n Parameters\n ----------\n arr : :obj:`numpy.ndarray` of shape (X, Y, Z)\n 3D array that has been thresholded at the voxel level.\n cluster_threshold : :obj:`float`\n Cluster-size threshold, in voxels, to apply to ``arr``.\n copy : :obj:`bool`, optional\n Whether to copy the array before modifying it or not.\n Default is True.\n\n Returns\n -------\n arr : :obj:`numpy.ndarray` of shape (X, Y, Z)\n Cluster-extent thresholded array.\n\n Notes\n -----\n Clusters are defined in a bi-sided manner;\n both negative and positive clusters are evaluated,\n but this is done separately for each sign.\n\n Clusters are defined using 6-connectivity, also known as NN1 (in AFNI) or\n \"faces\" connectivity.\n \"\"\"\n assert arr.ndim == 3\n\n if copy:\n arr = arr.copy()\n\n # Define array for 6-connectivity, aka NN1 or \"faces\"\n conn_mat = np.zeros((3, 3, 3), int)\n conn_mat[:, 1, 1] = 1\n conn_mat[1, :, 1] = 1\n conn_mat[1, 1, :] = 1\n\n for sign in np.sign(arr):\n # Binarize using one-sided cluster-defining threshold\n binarized = ((arr * sign) > 0).astype(int)\n\n # Apply cluster threshold\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n for c_val in clust_ids:\n if np.sum(label_map == c_val) < cluster_threshold:\n arr[label_map == c_val] = 0\n\n return arr\n\n\ndef threshold_img(\n img,\n threshold,\n cluster_threshold=0,\n two_sided=True,\n mask_img=None,\n copy=True,\n):\n \"\"\"Threshold the given input image, mostly statistical or atlas images.\n\n Thresholding can be done based on direct image intensities or selection\n threshold with given percentile.\n\n .. versionchanged:: 0.9.0\n New ``cluster_threshold`` and ``two_sided`` parameters added.\n\n .. versionadded:: 0.2\n\n Parameters\n ----------\n img : a 3D/4D Niimg-like object\n Image containing statistical or atlas maps which should be thresholded.\n\n threshold : :obj:`float` or :obj:`str`\n If float, we threshold the image based on image intensities meaning\n voxels which have intensities greater than this value will be kept.\n The given value should be within the range of minimum and\n maximum intensity of the input image.\n If string, it should finish with percent sign e.g. \"80%\" and we threshold\n based on the score obtained using this percentile on the image data. The\n voxels which have intensities greater than this score will be kept.\n The given string should be within the range of \"0%\" to \"100%\".\n\n cluster_threshold : :obj:`float`, optional\n Cluster size threshold, in voxels. In the returned thresholded map,\n sets of connected voxels (``clusters``) with size smaller\n than this number will be removed. Default=0.\n\n .. versionadded:: 0.9.0\n\n two_sided : :obj:`bool`, optional\n Whether the thresholding should yield both positive and negative\n part of the maps.\n Default=True.\n\n .. versionadded:: 0.9.0\n\n mask_img : Niimg-like object, default None, optional\n Mask image applied to mask the input data.\n If None, no masking will be applied.\n\n copy : :obj:`bool`, optional\n If True, input array is not modified. True by default: the filtering\n is not performed in-place. Default=True.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Thresholded image of the given input image.\n\n See also\n --------\n nilearn.glm.threshold_stats_img :\n Threshold a statistical image using the alpha value, optionally with\n false positive control.\n\n \"\"\"\n from . import resampling\n from .. import masking\n\n img = check_niimg(img)\n img_data = _safe_get_data(img, ensure_finite=True, copy_data=copy)\n affine = img.affine\n\n if mask_img is not None:\n mask_img = check_niimg_3d(mask_img)\n if not _check_same_fov(img, mask_img):\n mask_img = resampling.resample_img(mask_img, target_affine=affine,\n target_shape=img.shape[:3],\n interpolation=\"nearest\")\n\n mask_data, _ = masking._load_mask_img(mask_img)\n # Set as 0 for the values which are outside of the mask\n img_data[mask_data == 0.] = 0.\n\n cutoff_threshold = check_threshold(\n threshold,\n img_data,\n percentile_func=scoreatpercentile,\n name='threshold',\n )\n\n # Apply threshold\n if two_sided:\n img_data[np.abs(img_data) < cutoff_threshold] = 0.\n else:\n img_data[img_data < cutoff_threshold] = 0.\n\n # Expand to 4D to support both 3D and 4D\n expand_to_4d = img_data.ndim == 3\n if expand_to_4d:\n img_data = img_data[:, :, :, None]\n\n # Perform cluster thresholding, if requested\n if cluster_threshold > 0:\n for i_vol in range(img_data.shape[3]):\n img_data[..., i_vol] = _apply_cluster_size_threshold(\n img_data[..., i_vol],\n cluster_threshold,\n )\n\n if expand_to_4d:\n # Reduce back to 3D\n img_data = img_data[:, :, :, 0]\n\n # Reconstitute img object\n thresholded_img = new_img_like(img, img_data, affine)\n\n return thresholded_img\n\n\ndef math_img(formula, **imgs):\n \"\"\"Interpret a numpy based string formula using niimg in named parameters.\n\n .. versionadded:: 0.2.3\n\n Parameters\n ----------\n formula : :obj:`str`\n The mathematical formula to apply to image internal data. It can use\n numpy imported as 'np'.\n\n imgs : images (:class:`~nibabel.nifti1.Nifti1Image` or file names)\n Keyword arguments corresponding to the variables in the formula as\n Nifti images. All input images should have the same geometry (shape,\n affine).\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Result of the formula as a Nifti image. Note that the dimension of the\n result image can be smaller than the input image. The affine is the\n same as the input image.\n\n See Also\n --------\n nilearn.image.mean_img : To simply compute the mean of multiple images\n\n Examples\n --------\n Let's load an image using nilearn datasets module::\n\n >>> from nilearn import datasets\n >>> anatomical_image = datasets.load_mni152_template()\n\n Now we can use any numpy function on this image::\n\n >>> from nilearn.image import math_img\n >>> log_img = math_img(\"np.log(img)\", img=anatomical_image)\n\n We can also apply mathematical operations on several images::\n\n >>> result_img = math_img(\"img1 + img2\",\n ... img1=anatomical_image, img2=log_img)\n\n Notes\n -----\n This function is the Python equivalent of ImCal in SPM or fslmaths\n in FSL.\n\n \"\"\"\n try:\n # Check that input images are valid niimg and have a compatible shape\n # and affine.\n niimgs = []\n for image in imgs.values():\n niimgs.append(check_niimg(image))\n _check_same_fov(*niimgs, raise_error=True)\n except Exception as exc:\n exc.args = ((\"Input images cannot be compared, you provided '{0}',\"\n .format(imgs.values()),) + exc.args)\n raise\n\n # Computing input data as a dictionary of numpy arrays. Keep a reference\n # niimg for building the result as a new niimg.\n niimg = None\n data_dict = {}\n for key, img in imgs.items():\n niimg = check_niimg(img)\n data_dict[key] = _safe_get_data(niimg)\n\n # Add a reference to numpy in the kwargs of eval so that numpy functions\n # can be called from there.\n data_dict['np'] = np\n try:\n result = eval(formula, data_dict)\n except Exception as exc:\n exc.args = ((\"Input formula couldn't be processed, you provided '{0}',\"\n .format(formula),) + exc.args)\n raise\n\n return new_img_like(niimg, result, niimg.affine)\n\n\ndef binarize_img(img, threshold=0, mask_img=None):\n \"\"\"Binarize an image such that its values are either 0 or 1.\n\n .. versionadded:: 0.8.1\n\n Parameters\n ----------\n img : a 3D/4D Niimg-like object\n Image which should be binarized.\n\n threshold : :obj:`float` or :obj:`str`\n If float, we threshold the image based on image intensities meaning\n voxels which have intensities greater than this value will be kept.\n The given value should be within the range of minimum and\n maximum intensity of the input image.\n If string, it should finish with percent sign e.g. \"80%\" and we\n threshold based on the score obtained using this percentile on\n the image data. The voxels which have intensities greater than\n this score will be kept. The given string should be\n within the range of \"0%\" to \"100%\".\n\n mask_img : Niimg-like object, default None, optional\n Mask image applied to mask the input data.\n If None, no masking will be applied.\n\n Returns\n -------\n :class:`~nibabel.nifti1.Nifti1Image`\n Binarized version of the given input image. Output dtype is int.\n\n See Also\n --------\n nilearn.image.threshold_img : To simply threshold but not binarize images.\n\n Examples\n --------\n Let's load an image using nilearn datasets module::\n\n >>> from nilearn import datasets\n >>> anatomical_image = datasets.load_mni152_template()\n\n Now we binarize it, generating a pseudo brainmask::\n\n >>> from nilearn.image import binarize_img\n >>> img = binarize_img(anatomical_image)\n\n \"\"\"\n return math_img(\n \"img.astype(bool).astype(int)\",\n img=threshold_img(img, threshold, mask_img=mask_img)\n )\n\n\n@rename_parameters({'sessions': 'runs'}, '0.10.0')\ndef clean_img(imgs, runs=None, detrend=True, standardize=True,\n confounds=None, low_pass=None, high_pass=None, t_r=None,\n ensure_finite=False, mask_img=None):\n \"\"\"Improve SNR on masked fMRI signals.\n\n This function can do several things on the input signals, in\n the following order:\n\n - detrend\n - low- and high-pass filter\n - remove confounds\n - standardize\n\n Low-pass filtering improves specificity.\n\n High-pass filtering should be kept small, to keep some sensitivity.\n\n Filtering is only meaningful on evenly-sampled signals.\n\n According to Lindquist et al. (2018), removal of confounds will be done\n orthogonally to temporal filters (low- and/or high-pass filters), if both\n are specified.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n imgs : Niimg-like object\n 4D image. The signals in the last dimension are filtered (see\n http://nilearn.github.io/manipulating_images/input_output.html\n for a detailed description of the valid input types).\n\n runs : :class:`numpy.ndarray`, optional\n Add a run level to the cleaning process. Each run will be\n cleaned independently. Must be a 1D array of n_samples elements.\n\n .. warning::\n\n 'runs' replaces 'sessions' after release 0.10.0.\n Using 'session' will result in an error after release 0.10.0.\n\n Default=``None``.\n\n detrend : :obj:`bool`, optional\n If detrending should be applied on timeseries (before confound removal).\n Default=True.\n\n standardize : :obj:`bool`, optional\n If True, returned signals are set to unit variance. Default=True.\n\n confounds : :class:`numpy.ndarray`, :obj:`str` or :obj:`list` of\n Confounds timeseries. optional\n Shape must be (instant number, confound number), or just (instant number,)\n The number of time instants in signals and confounds must be\n identical (i.e. signals.shape[0] == confounds.shape[0]).\n If a string is provided, it is assumed to be the name of a csv file\n containing signals as columns, with an optional one-line header.\n If a list is provided, all confounds are removed from the input\n signal, as if all were in the same array.\n\n low_pass : :obj:`float`, optional\n Low cutoff frequencies, in Hertz.\n\n high_pass : :obj:`float`, optional\n High cutoff frequencies, in Hertz.\n\n t_r : :obj:`float`, optional\n Repetition time, in second (sampling period). Set to None if not\n specified. Mandatory if used together with `low_pass` or `high_pass`.\n\n ensure_finite : :obj:`bool`, optional\n If True, the non-finite values (NaNs and infs) found in the images\n will be replaced by zeros. Default=False.\n\n mask_img : Niimg-like object, optional\n If provided, signal is only cleaned from voxels inside the mask. If\n mask is provided, it should have same shape and affine as imgs.\n If not provided, all voxels are used.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n Niimg-like object\n Input images, cleaned. Same shape as `imgs`.\n\n Notes\n -----\n Confounds removal is based on a projection on the orthogonal\n of the signal space [:footcite:`friston1994statistical`].\n\n Orthogonalization between temporal filters and confound removal is based on\n suggestions in [:footcite:`Lindquist407676`].\n\n References\n ----------\n .. footbibliography::\n\n See Also\n --------\n nilearn.signal.clean\n\n \"\"\"\n # Avoid circular import\n from .image import new_img_like\n from .. import masking\n\n imgs_ = check_niimg_4d(imgs)\n\n # Check if t_r is set, otherwise propose t_r from imgs header\n if low_pass is not None or high_pass is not None:\n if t_r is None:\n\n # We raise an error, instead of using the header's t_r as this\n # value is considered to be non-reliable\n raise ValueError(\n \"Repetition time (t_r) must be specified for filtering. You \"\n \"specified None. imgs header suggest it to be {0}\".format(\n imgs.header.get_zooms()[3]))\n\n # Prepare signal for cleaning\n if mask_img is not None:\n signals = masking.apply_mask(imgs_, mask_img)\n else:\n signals = get_data(imgs_).reshape(-1, imgs_.shape[-1]).T\n\n # Clean signal\n data = signal.clean(\n signals, runs=runs, detrend=detrend, standardize=standardize,\n confounds=confounds, low_pass=low_pass, high_pass=high_pass, t_r=t_r,\n ensure_finite=ensure_finite)\n\n # Put results back into Niimg-like object\n if mask_img is not None:\n imgs_ = masking.unmask(data, mask_img)\n else:\n imgs_ = new_img_like(\n imgs_, data.T.reshape(imgs_.shape), copy_header=True)\n\n return imgs_\n\n\ndef load_img(img, wildcards=True, dtype=None):\n \"\"\"Load a Niimg-like object from filenames or list of filenames.\n\n .. versionadded:: 0.2.5\n\n Parameters\n ----------\n img : Niimg-like object\n If string, consider it as a path to NIfTI image and call `nibabel.load()`\n on it. The '~' symbol is expanded to the user home folder.\n If it is an object, check if affine attribute is present, raise\n `TypeError` otherwise.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n wildcards : :obj:`bool`, optional\n Use `img` as a regular expression to get a list of matching input\n filenames.\n If multiple files match, the returned list is sorted using an ascending\n order.\n If no file matches the regular expression, a `ValueError` exception is\n raised.\n Default=True.\n\n dtype : {dtype, \"auto\"}, optional\n Data type toward which the data should be converted. If \"auto\", the\n data will be converted to int32 if dtype is discrete and float32 if it\n is continuous.\n\n Returns\n -------\n 3D/4D Niimg-like object\n Result can be :class:`~nibabel.nifti1.Nifti1Image` or the input, as-is. It is guaranteed\n that the returned object has an affine attributes and that\n nilearn.image.get_data returns its data.\n\n \"\"\"\n return check_niimg(img, wildcards=wildcards, dtype=dtype)\n\n\ndef largest_connected_component_img(imgs):\n \"\"\"Return the largest connected component of an image or list of images.\n\n .. versionadded:: 0.3.1\n\n Parameters\n ----------\n imgs : Niimg-like object or iterable of Niimg-like objects (3D)\n Image(s) to extract the largest connected component from.\n See http://nilearn.github.io/manipulating_images/input_output.html.\n\n Returns\n -------\n 3D Niimg-like object or list of\n Image or list of images containing the largest connected component.\n\n Notes\n -----\n **Handling big-endian in given Nifti image**\n This function changes the existing byte-ordering information to new byte\n order, if the dtype in given Nifti image has non-native data type.\n This operation is done internally to avoid big-endian issues with\n scipy ndimage module.\n\n \"\"\"\n from .._utils.ndimage import largest_connected_component\n\n if hasattr(imgs, \"__iter__\") and not isinstance(imgs, str):\n single_img = False\n else:\n single_img = True\n imgs = [imgs]\n\n ret = []\n for img in imgs:\n img = check_niimg_3d(img)\n affine = img.affine\n largest_component = largest_connected_component(_safe_get_data(img))\n ret.append(new_img_like(img, largest_component, affine,\n copy_header=True))\n\n if single_img:\n return ret[0]\n else:\n return ret\n", "path": "nilearn/image/image.py" } ]
diff --git a/nilearn/image/image.py b/nilearn/image/image.py index 9d37eef921..05770b4ead 100644 --- a/nilearn/image/image.py +++ b/nilearn/image/image.py @@ -697,7 +697,8 @@ def new_img_like(ref_niimg, data, affine=None, copy_header=False): Returns ------- Niimg-like object - A loaded image with the same type (and header) as the reference image. + A loaded image with the same file type (and, optionally, header) + as the reference image. """ # Hand-written loading code to avoid too much memory consumption
ibis-project__ibis-2157
Interactive mode doesn't work in Jupyter Running the next code in the terminal works as expected, `.count()` returns the number of rows as an `int`, since the option `interactive` is set to `True`: ```python >>> import ibis >>> print(ibis.__version__) 1.3.0+24.gd00a112.dirty >>> ibis.options.interactive = True >>> conn = ibis.sqlite.connect('geography.db') >>> conn.table('countries').count() 252 ``` But running the same exact code in a notebook returns the expression visualization instead: ![ibis_interactive_bug](https://user-images.githubusercontent.com/10058240/77928929-f7720500-72a0-11ea-9ffb-080eb810c10a.png)
[ { "content": "import itertools\nimport os\nimport webbrowser\n\nimport numpy as np\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.config as config\nimport ibis.util as util\n\n# TODO move methods containing ops import to api.py\n\n\nclass Expr:\n \"\"\"Base expression class\"\"\"\n\n def _type_display(self):\n return type(self).__name__\n\n def __init__(self, arg):\n # TODO: all inputs must inherit from a common table API\n self._arg = arg\n\n def __repr__(self):\n if not config.options.interactive:\n return self._repr()\n\n try:\n result = self.execute()\n except com.TranslationError as e:\n output = (\n 'Translation to backend failed\\n'\n 'Error message: {0}\\n'\n 'Expression repr follows:\\n{1}'.format(e.args[0], self._repr())\n )\n return output\n else:\n return repr(result)\n\n def __hash__(self):\n return hash(self._key)\n\n def __bool__(self):\n raise ValueError(\n \"The truth value of an Ibis expression is not \" \"defined\"\n )\n\n __nonzero__ = __bool__\n\n def _repr(self, memo=None):\n from ibis.expr.format import ExprFormatter\n\n return ExprFormatter(self, memo=memo).get_result()\n\n @property\n def _safe_name(self):\n \"\"\"Get the name of an expression `expr`, returning ``None`` if the\n expression has no name.\n\n Returns\n -------\n Optional[str]\n \"\"\"\n try:\n return self.get_name()\n except (com.ExpressionError, AttributeError):\n return None\n\n @property\n def _key(self):\n \"\"\"Key suitable for hashing an expression.\n\n Returns\n -------\n Tuple[Type[Expr], Optional[str], ibis.expr.operations.Node]\n A tuple of hashable objects uniquely identifying this expression.\n \"\"\"\n return type(self), self._safe_name, self.op()\n\n def _repr_png_(self):\n if not ibis.options.graphviz_repr:\n return None\n try:\n import ibis.expr.visualize as viz\n except ImportError:\n return None\n else:\n try:\n return viz.to_graph(self).pipe(format='png')\n except Exception:\n # Something may go wrong, and we can't error in the notebook\n # so fallback to the default text representation.\n return None\n\n def visualize(self, format='svg'):\n \"\"\"Visualize an expression in the browser as an SVG image.\n\n Parameters\n ----------\n format : str, optional\n Defaults to ``'svg'``. Some additional formats are\n ``'jpeg'`` and ``'png'``. These are specified by the ``graphviz``\n Python library.\n\n Notes\n -----\n This method opens a web browser tab showing the image of the expression\n graph created by the code in :module:`ibis.expr.visualize`.\n\n Raises\n ------\n ImportError\n If ``graphviz`` is not installed.\n \"\"\"\n import ibis.expr.visualize as viz\n\n path = viz.draw(viz.to_graph(self), format=format)\n webbrowser.open('file://{}'.format(os.path.abspath(path)))\n\n def pipe(self, f, *args, **kwargs):\n \"\"\"Generic composition function to enable expression pipelining.\n\n Parameters\n ----------\n f : function or (function, arg_name) tuple\n If the expression needs to be passed as anything other than the first\n argument to the function, pass a tuple with the argument name. For\n example, (f, 'data') if the function f expects a 'data' keyword\n args : positional arguments\n kwargs : keyword arguments\n\n Examples\n --------\n >>> import ibis\n >>> t = ibis.table([('a', 'int64'), ('b', 'string')], name='t')\n >>> f = lambda a: (a + 1).name('a')\n >>> g = lambda a: (a * 2).name('a')\n >>> result1 = t.a.pipe(f).pipe(g)\n >>> result1 # doctest: +NORMALIZE_WHITESPACE\n ref_0\n UnboundTable[table]\n name: t\n schema:\n a : int64\n b : string\n a = Multiply[int64*]\n left:\n a = Add[int64*]\n left:\n a = Column[int64*] 'a' from table\n ref_0\n right:\n Literal[int8]\n 1\n right:\n Literal[int8]\n 2\n >>> result2 = g(f(t.a)) # equivalent to the above\n >>> result1.equals(result2)\n True\n\n Returns\n -------\n result : result type of passed function\n \"\"\"\n if isinstance(f, tuple):\n f, data_keyword = f\n kwargs = kwargs.copy()\n kwargs[data_keyword] = self\n return f(*args, **kwargs)\n else:\n return f(self, *args, **kwargs)\n\n __call__ = pipe\n\n def op(self):\n return self._arg\n\n @property\n def _factory(self):\n return type(self)\n\n def execute(self, limit='default', params=None, **kwargs):\n \"\"\"\n If this expression is based on physical tables in a database backend,\n execute it against that backend.\n\n Parameters\n ----------\n limit : integer or None, default 'default'\n Pass an integer to effect a specific row limit. limit=None means \"no\n limit\". The default is whatever is in ibis.options.\n\n Returns\n -------\n result : expression-dependent\n Result of compiling expression and executing in backend\n \"\"\"\n from ibis.client import execute\n\n return execute(self, limit=limit, params=params, **kwargs)\n\n def compile(self, limit=None, params=None):\n \"\"\"\n Compile expression to whatever execution target, to verify\n\n Returns\n -------\n compiled : value or list\n query representation or list thereof\n \"\"\"\n from ibis.client import compile\n\n return compile(self, limit=limit, params=params)\n\n def verify(self):\n \"\"\"\n Returns True if expression can be compiled to its attached client\n \"\"\"\n try:\n self.compile()\n except Exception:\n return False\n else:\n return True\n\n def equals(self, other, cache=None):\n if type(self) != type(other):\n return False\n return self._arg.equals(other._arg, cache=cache)\n\n def _root_tables(self):\n return self.op().root_tables()\n\n\nclass ExprList(Expr):\n def _type_display(self):\n return ', '.join(expr._type_display() for expr in self.exprs())\n\n def exprs(self):\n return self.op().exprs\n\n def names(self):\n return [x.get_name() for x in self.exprs()]\n\n def types(self):\n return [x.type() for x in self.exprs()]\n\n def schema(self):\n import ibis.expr.schema as sch\n\n return sch.Schema(self.names(), self.types())\n\n def rename(self, f):\n import ibis.expr.operations as ops\n\n new_exprs = [x.name(f(x.get_name())) for x in self.exprs()]\n return ops.ExpressionList(new_exprs).to_expr()\n\n def prefix(self, value):\n return self.rename(lambda x: value + x)\n\n def suffix(self, value):\n return self.rename(lambda x: x + value)\n\n def concat(self, *others):\n \"\"\"\n Concatenate expression lists\n\n Returns\n -------\n combined : ExprList\n \"\"\"\n import ibis.expr.operations as ops\n\n exprs = list(self.exprs())\n for o in others:\n if not isinstance(o, ExprList):\n raise TypeError(o)\n exprs.extend(o.exprs())\n return ops.ExpressionList(exprs).to_expr()\n\n\n# ---------------------------------------------------------------------\n# Helper / factory functions\n\n\nclass ValueExpr(Expr):\n\n \"\"\"\n Base class for a data generating expression having a fixed and known type,\n either a single value (scalar)\n \"\"\"\n\n def __init__(self, arg, dtype, name=None):\n super().__init__(arg)\n self._name = name\n self._dtype = dtype\n\n def equals(self, other, cache=None):\n return (\n isinstance(other, ValueExpr)\n and self._name == other._name\n and self._dtype == other._dtype\n and super().equals(other, cache=cache)\n )\n\n def has_name(self):\n if self._name is not None:\n return True\n return self.op().has_resolved_name()\n\n def get_name(self):\n if self._name is not None:\n # This value has been explicitly named\n return self._name\n\n # In some but not all cases we can get a name from the node that\n # produces the value\n return self.op().resolve_name()\n\n def name(self, name):\n return self._factory(self._arg, name=name)\n\n def type(self):\n return self._dtype\n\n @property\n def _factory(self):\n def factory(arg, name=None):\n return type(self)(arg, dtype=self.type(), name=name)\n\n return factory\n\n\nclass ScalarExpr(ValueExpr):\n def _type_display(self):\n return str(self.type())\n\n\nclass ColumnExpr(ValueExpr):\n def _type_display(self):\n return '{}*'.format(self.type())\n\n def parent(self):\n return self._arg\n\n def to_projection(self):\n \"\"\"\n Promote this column expression to a table projection\n \"\"\"\n roots = self._root_tables()\n if len(roots) > 1:\n raise com.RelationError(\n 'Cannot convert array expression '\n 'involving multiple base table references '\n 'to a projection'\n )\n\n table = TableExpr(roots[0])\n return table.projection([self])\n\n\nclass AnalyticExpr(Expr):\n @property\n def _factory(self):\n def factory(arg):\n return type(self)(arg)\n\n return factory\n\n def _type_display(self):\n return str(self.type())\n\n def type(self):\n return 'analytic'\n\n\nclass TableExpr(Expr):\n @property\n def _factory(self):\n def factory(arg):\n return TableExpr(arg)\n\n return factory\n\n def _type_display(self):\n return 'table'\n\n def _is_valid(self, exprs):\n try:\n self._assert_valid(util.promote_list(exprs))\n except com.RelationError:\n return False\n else:\n return True\n\n def _assert_valid(self, exprs):\n from ibis.expr.analysis import ExprValidator\n\n ExprValidator([self]).validate_all(exprs)\n\n def __contains__(self, name):\n return name in self.schema()\n\n def __getitem__(self, what):\n if isinstance(what, (str, int)):\n return self.get_column(what)\n\n if isinstance(what, slice):\n step = what.step\n if step is not None and step != 1:\n raise ValueError('Slice step can only be 1')\n start = what.start or 0\n stop = what.stop\n\n if stop is None or stop < 0:\n raise ValueError('End index must be a positive number')\n\n if start < 0:\n raise ValueError('Start index must be a positive number')\n\n return self.limit(stop - start, offset=start)\n\n what = bind_expr(self, what)\n\n if isinstance(what, AnalyticExpr):\n what = what._table_getitem()\n\n if isinstance(what, (list, tuple, TableExpr)):\n # Projection case\n return self.projection(what)\n elif isinstance(what, BooleanColumn):\n # Boolean predicate\n return self.filter([what])\n elif isinstance(what, ColumnExpr):\n # Projection convenience\n return self.projection(what)\n else:\n raise NotImplementedError(\n 'Selection rows or columns with {} objects is not '\n 'supported'.format(type(what).__name__)\n )\n\n def __len__(self):\n raise com.ExpressionError('Use .count() instead')\n\n def __setstate__(self, instance_dictionary):\n self.__dict__ = instance_dictionary\n\n def __getattr__(self, key):\n try:\n schema = self.schema()\n except com.IbisError:\n raise AttributeError(key)\n\n if key not in schema:\n raise AttributeError(key)\n\n try:\n return self.get_column(key)\n except com.IbisTypeError:\n raise AttributeError(key)\n\n def __dir__(self):\n attrs = dir(type(self))\n if self._is_materialized():\n attrs = frozenset(attrs + self.schema().names)\n return sorted(attrs)\n\n def _resolve(self, exprs):\n exprs = util.promote_list(exprs)\n return list(map(self._ensure_expr, exprs))\n\n def _ensure_expr(self, expr):\n if isinstance(expr, str):\n return self[expr]\n elif isinstance(expr, (int, np.integer)):\n return self[self.schema().name_at_position(expr)]\n elif not isinstance(expr, Expr):\n return expr(self)\n else:\n return expr\n\n def _get_type(self, name):\n return self._arg.get_type(name)\n\n def get_columns(self, iterable):\n \"\"\"\n Get multiple columns from the table\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table(\n ... [\n ... ('a', 'int64'),\n ... ('b', 'string'),\n ... ('c', 'timestamp'),\n ... ('d', 'float'),\n ... ],\n ... name='t'\n ... )\n >>> a, b, c = table.get_columns(['a', 'b', 'c'])\n\n Returns\n -------\n columns : list of column/array expressions\n \"\"\"\n return [self.get_column(x) for x in iterable]\n\n def get_column(self, name):\n \"\"\"\n Get a reference to a single column from the table\n\n Returns\n -------\n column : array expression\n \"\"\"\n import ibis.expr.operations as ops\n\n ref = ops.TableColumn(name, self)\n return ref.to_expr()\n\n @property\n def columns(self):\n return self.schema().names\n\n def schema(self):\n \"\"\"\n Get the schema for this table (if one is known)\n\n Returns\n -------\n schema : Schema\n \"\"\"\n if not self._is_materialized():\n raise com.IbisError('Table operation is not yet materialized')\n return self.op().schema\n\n def _is_materialized(self):\n # The operation produces a known schema\n return self.op().has_schema()\n\n def group_by(self, by=None, **additional_grouping_expressions):\n \"\"\"\n Create an intermediate grouped table expression, pending some group\n operation to be applied with it.\n\n Examples\n --------\n >>> import ibis\n >>> pairs = [('a', 'int32'), ('b', 'timestamp'), ('c', 'double')]\n >>> t = ibis.table(pairs)\n >>> b1, b2 = t.a, t.b\n >>> result = t.group_by([b1, b2]).aggregate(sum_of_c=t.c.sum())\n\n Notes\n -----\n group_by and groupby are equivalent, with `groupby` being provided for\n ease-of-use for pandas users.\n\n Returns\n -------\n grouped_expr : GroupedTableExpr\n \"\"\"\n from ibis.expr.groupby import GroupedTableExpr\n\n return GroupedTableExpr(self, by, **additional_grouping_expressions)\n\n groupby = group_by\n\n\n# -----------------------------------------------------------------------------\n# Declare all typed ValueExprs. This is what the user will actually interact\n# with: an instance of each is well-typed and includes all valid methods\n# defined for each type.\n\n\nclass AnyValue(ValueExpr):\n pass # noqa: E701,E302\n\n\nclass AnyScalar(ScalarExpr, AnyValue):\n pass # noqa: E701,E302\n\n\nclass AnyColumn(ColumnExpr, AnyValue):\n pass # noqa: E701,E302\n\n\nclass NullValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass NullScalar(AnyScalar, NullValue):\n pass # noqa: E701,E302\n\n\nclass NullColumn(AnyColumn, NullValue):\n pass # noqa: E701,E302\n\n\nclass NumericValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass NumericScalar(AnyScalar, NumericValue):\n pass # noqa: E701,E302\n\n\nclass NumericColumn(AnyColumn, NumericValue):\n pass # noqa: E701,E302\n\n\nclass BooleanValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass BooleanScalar(NumericScalar, BooleanValue):\n pass # noqa: E701,E302\n\n\nclass BooleanColumn(NumericColumn, BooleanValue):\n pass # noqa: E701,E302\n\n\nclass IntegerValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass IntegerScalar(NumericScalar, IntegerValue):\n pass # noqa: E701,E302\n\n\nclass IntegerColumn(NumericColumn, IntegerValue):\n pass # noqa: E701,E302\n\n\nclass FloatingValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass FloatingScalar(NumericScalar, FloatingValue):\n pass # noqa: E701,E302\n\n\nclass FloatingColumn(NumericColumn, FloatingValue):\n pass # noqa: E701,E302\n\n\nclass DecimalValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass DecimalScalar(NumericScalar, DecimalValue):\n pass # noqa: E701,E302\n\n\nclass DecimalColumn(NumericColumn, DecimalValue):\n pass # noqa: E701,E302\n\n\nclass StringValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass StringScalar(AnyScalar, StringValue):\n pass # noqa: E701,E302\n\n\nclass StringColumn(AnyColumn, StringValue):\n pass # noqa: E701,E302\n\n\nclass BinaryValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass BinaryScalar(AnyScalar, BinaryValue):\n pass # noqa: E701,E302\n\n\nclass BinaryColumn(AnyColumn, BinaryValue):\n pass # noqa: E701,E302\n\n\nclass TemporalValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass TemporalScalar(AnyScalar, TemporalValue):\n pass # noqa: E701,E302\n\n\nclass TemporalColumn(AnyColumn, TemporalValue):\n pass # noqa: E701,E302\n\n\nclass TimeValue(TemporalValue):\n pass # noqa: E701,E302\n\n\nclass TimeScalar(TemporalScalar, TimeValue):\n pass # noqa: E701,E302\n\n\nclass TimeColumn(TemporalColumn, TimeValue):\n pass # noqa: E701,E302\n\n\nclass DateValue(TemporalValue):\n pass # noqa: E701,E302\n\n\nclass DateScalar(TemporalScalar, DateValue):\n pass # noqa: E701,E302\n\n\nclass DateColumn(TemporalColumn, DateValue):\n pass # noqa: E701,E302\n\n\nclass TimestampValue(TemporalValue):\n pass # noqa: E701,E302\n\n\nclass TimestampScalar(TemporalScalar, TimestampValue):\n pass # noqa: E701,E302\n\n\nclass TimestampColumn(TemporalColumn, TimestampValue):\n pass # noqa: E701,E302\n\n\nclass CategoryValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass CategoryScalar(AnyScalar, CategoryValue):\n pass # noqa: E701,E302\n\n\nclass CategoryColumn(AnyColumn, CategoryValue):\n pass # noqa: E701,E302\n\n\nclass EnumValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass EnumScalar(AnyScalar, EnumValue):\n pass # noqa: E701,E302\n\n\nclass EnumColumn(AnyColumn, EnumValue):\n pass # noqa: E701,E302\n\n\nclass ArrayValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass ArrayScalar(AnyScalar, ArrayValue):\n pass # noqa: E701,E302\n\n\nclass ArrayColumn(AnyColumn, ArrayValue):\n pass # noqa: E701,E302\n\n\nclass SetValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass SetScalar(AnyScalar, SetValue):\n pass # noqa: E701,E302\n\n\nclass SetColumn(AnyColumn, SetValue):\n pass # noqa: E701,E302\n\n\nclass MapValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass MapScalar(AnyScalar, MapValue):\n pass # noqa: E701,E302\n\n\nclass MapColumn(AnyColumn, MapValue):\n pass # noqa: E701,E302\n\n\nclass JSONValue(StringValue):\n pass # noqa: E701,E302\n\n\nclass JSONScalar(StringScalar, JSONValue):\n pass # noqa: E701,E302\n\n\nclass JSONColumn(StringColumn, JSONValue):\n pass # noqa: E701,E302\n\n\nclass JSONBValue(BinaryValue):\n pass # noqa: E701,E302\n\n\nclass JSONBScalar(BinaryScalar, JSONBValue):\n pass # noqa: E701,E302\n\n\nclass JSONBColumn(BinaryColumn, JSONBValue):\n pass # noqa: E701,E302\n\n\nclass StructValue(AnyValue):\n def __dir__(self):\n return sorted(\n frozenset(itertools.chain(dir(type(self)), self.type().names))\n )\n\n\nclass StructScalar(AnyScalar, StructValue):\n pass # noqa: E701,E302\n\n\nclass StructColumn(AnyColumn, StructValue):\n pass # noqa: E701,E302\n\n\nclass IntervalValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass IntervalScalar(AnyScalar, IntervalValue):\n pass # noqa: E701,E302\n\n\nclass IntervalColumn(AnyColumn, IntervalValue):\n pass # noqa: E701,E302\n\n\nclass GeoSpatialValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass GeoSpatialScalar(NumericScalar, GeoSpatialValue):\n pass # noqa: E701,E302,E501\n\n\nclass GeoSpatialColumn(NumericColumn, GeoSpatialValue):\n pass # noqa: E701,E302,E501\n\n\nclass PointValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass PointScalar(GeoSpatialScalar, PointValue):\n pass # noqa: E701,E302\n\n\nclass PointColumn(GeoSpatialColumn, PointValue):\n pass # noqa: E701,E302\n\n\nclass LineStringValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass LineStringScalar(GeoSpatialScalar, LineStringValue):\n pass # noqa: E701,E302,E501\n\n\nclass LineStringColumn(GeoSpatialColumn, LineStringValue):\n pass # noqa: E701,E302,E501\n\n\nclass PolygonValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass PolygonScalar(GeoSpatialScalar, PolygonValue):\n pass # noqa: E701,E302\n\n\nclass PolygonColumn(GeoSpatialColumn, PolygonValue):\n pass # noqa: E701,E302\n\n\nclass MultiLineStringValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass MultiLineStringScalar(\n GeoSpatialScalar, MultiLineStringValue\n): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiLineStringColumn(\n GeoSpatialColumn, MultiLineStringValue\n): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiPointValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass MultiPointScalar(GeoSpatialScalar, MultiPointValue): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiPointColumn(GeoSpatialColumn, MultiPointValue): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiPolygonValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass MultiPolygonScalar(GeoSpatialScalar, MultiPolygonValue): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiPolygonColumn(GeoSpatialColumn, MultiPolygonValue): # noqa: E302\n pass # noqa: E701\n\n\nclass UUIDValue(StringValue):\n pass # noqa: E701,E302\n\n\nclass UUIDScalar(StringScalar, UUIDValue):\n pass # noqa: E701,E302\n\n\nclass UUIDColumn(StringColumn, UUIDValue):\n pass # noqa: E701,E302\n\n\nclass ListExpr(ColumnExpr, AnyValue):\n @property\n def values(self):\n return self.op().values\n\n def __iter__(self):\n return iter(self.values)\n\n def __getitem__(self, key):\n return self.values[key]\n\n def __add__(self, other):\n other_values = tuple(getattr(other, 'values', other))\n return type(self.op())(self.values + other_values).to_expr()\n\n def __radd__(self, other):\n other_values = tuple(getattr(other, 'values', other))\n return type(self.op())(other_values + self.values).to_expr()\n\n def __bool__(self):\n return bool(self.values)\n\n __nonzero__ = __bool__\n\n def __len__(self):\n return len(self.values)\n\n\nclass TopKExpr(AnalyticExpr):\n def type(self):\n return 'topk'\n\n def _table_getitem(self):\n return self.to_filter()\n\n def to_filter(self):\n # TODO: move to api.py\n import ibis.expr.operations as ops\n\n return ops.SummaryFilter(self).to_expr()\n\n def to_aggregation(\n self, metric_name=None, parent_table=None, backup_metric_name=None\n ):\n \"\"\"\n Convert the TopK operation to a table aggregation\n \"\"\"\n op = self.op()\n\n arg_table = find_base_table(op.arg)\n\n by = op.by\n if not isinstance(by, Expr):\n by = by(arg_table)\n by_table = arg_table\n else:\n by_table = find_base_table(op.by)\n\n if metric_name is None:\n if by.get_name() == op.arg.get_name():\n by = by.name(backup_metric_name)\n else:\n by = by.name(metric_name)\n\n if arg_table.equals(by_table):\n agg = arg_table.aggregate(by, by=[op.arg])\n elif parent_table is not None:\n agg = parent_table.aggregate(by, by=[op.arg])\n else:\n raise com.IbisError(\n 'Cross-table TopK; must provide a parent ' 'joined table'\n )\n\n return agg.sort_by([(by.get_name(), False)]).limit(op.k)\n\n\nclass SortExpr(Expr):\n def _type_display(self):\n return 'array-sort'\n\n def get_name(self):\n return self.op().resolve_name()\n\n\nclass DayOfWeek(Expr):\n def index(self):\n \"\"\"Get the index of the day of the week.\n\n Returns\n -------\n IntegerValue\n The index of the day of the week. Ibis follows pandas conventions,\n where **Monday = 0 and Sunday = 6**.\n \"\"\"\n import ibis.expr.operations as ops\n\n return ops.DayOfWeekIndex(self.op().arg).to_expr()\n\n def full_name(self):\n \"\"\"Get the name of the day of the week.\n\n Returns\n -------\n StringValue\n The name of the day of the week\n \"\"\"\n import ibis.expr.operations as ops\n\n return ops.DayOfWeekName(self.op().arg).to_expr()\n\n\ndef bind_expr(table, expr):\n if isinstance(expr, (list, tuple)):\n return [bind_expr(table, x) for x in expr]\n\n return table._ensure_expr(expr)\n\n\n# TODO: move to analysis\ndef find_base_table(expr):\n if isinstance(expr, TableExpr):\n return expr\n\n for arg in expr.op().flat_args():\n if isinstance(arg, Expr):\n r = find_base_table(arg)\n if isinstance(r, TableExpr):\n return r\n\n\n_NULL = None\n\n\ndef null():\n \"\"\"Create a NULL/NA scalar\"\"\"\n import ibis.expr.operations as ops\n\n global _NULL\n if _NULL is None:\n _NULL = ops.NullLiteral().to_expr()\n\n return _NULL\n\n\ndef literal(value, type=None):\n \"\"\"Create a scalar expression from a Python value.\n\n Parameters\n ----------\n value : some Python basic type\n A Python value\n type : ibis type or string, optional\n An instance of :class:`ibis.expr.datatypes.DataType` or a string\n indicating the ibis type of `value`. This parameter should only be used\n in cases where ibis's type inference isn't sufficient for discovering\n the type of `value`.\n\n Returns\n -------\n literal_value : Literal\n An expression representing a literal value\n\n Examples\n --------\n >>> import ibis\n >>> x = ibis.literal(42)\n >>> x.type()\n int8\n >>> y = ibis.literal(42, type='double')\n >>> y.type()\n float64\n >>> ibis.literal('foobar', type='int64') # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n TypeError: Value 'foobar' cannot be safely coerced to int64\n \"\"\"\n import ibis.expr.datatypes as dt\n import ibis.expr.operations as ops\n\n if hasattr(value, 'op') and isinstance(value.op(), ops.Literal):\n return value\n\n try:\n inferred_dtype = dt.infer(value)\n except com.InputTypeError:\n has_inferred = False\n else:\n has_inferred = True\n\n if type is None:\n has_explicit = False\n else:\n has_explicit = True\n explicit_dtype = dt.dtype(type)\n\n if has_explicit and has_inferred:\n try:\n # ensure type correctness: check that the inferred dtype is\n # implicitly castable to the explicitly given dtype and value\n dtype = inferred_dtype.cast(explicit_dtype, value=value)\n except com.IbisTypeError:\n raise TypeError(\n 'Value {!r} cannot be safely coerced to {}'.format(value, type)\n )\n elif has_explicit:\n dtype = explicit_dtype\n elif has_inferred:\n dtype = inferred_dtype\n else:\n raise TypeError(\n 'The datatype of value {!r} cannot be inferred, try '\n 'passing it explicitly with the `type` keyword.'.format(value)\n )\n\n if dtype is dt.null:\n return null().cast(dtype)\n else:\n return ops.Literal(value, dtype=dtype).to_expr()\n\n\ndef sequence(values):\n \"\"\"\n Wrap a list of Python values as an Ibis sequence type\n\n Parameters\n ----------\n values : list\n Should all be None or the same type\n\n Returns\n -------\n seq : Sequence\n \"\"\"\n import ibis.expr.operations as ops\n\n return ops.ValueList(values).to_expr()\n\n\ndef as_value_expr(val):\n import pandas as pd\n\n if not isinstance(val, Expr):\n if isinstance(val, (tuple, list)):\n val = sequence(val)\n elif isinstance(val, pd.Series):\n val = sequence(list(val))\n else:\n val = literal(val)\n\n return val\n\n\ndef param(type):\n \"\"\"Create a parameter of a particular type to be defined just before\n execution.\n\n Parameters\n ----------\n type : dt.DataType\n The type of the unbound parameter, e.g., double, int64, date, etc.\n\n Returns\n -------\n ScalarExpr\n\n Examples\n --------\n >>> import ibis\n >>> import ibis.expr.datatypes as dt\n >>> start = ibis.param(dt.date)\n >>> end = ibis.param(dt.date)\n >>> schema = [('timestamp_col', 'timestamp'), ('value', 'double')]\n >>> t = ibis.table(schema)\n >>> predicates = [t.timestamp_col >= start, t.timestamp_col <= end]\n >>> expr = t.filter(predicates).value.sum()\n \"\"\"\n import ibis.expr.datatypes as dt\n import ibis.expr.operations as ops\n\n return ops.ScalarParameter(dt.dtype(type)).to_expr()\n\n\nclass UnnamedMarker:\n pass\n\n\nunnamed = UnnamedMarker()\n", "path": "ibis/expr/types.py" } ]
[ { "content": "import itertools\nimport os\nimport webbrowser\n\nimport numpy as np\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.config as config\nimport ibis.util as util\n\n# TODO move methods containing ops import to api.py\n\n\nclass Expr:\n \"\"\"Base expression class\"\"\"\n\n def _type_display(self):\n return type(self).__name__\n\n def __init__(self, arg):\n # TODO: all inputs must inherit from a common table API\n self._arg = arg\n\n def __repr__(self):\n if not config.options.interactive:\n return self._repr()\n\n try:\n result = self.execute()\n except com.TranslationError as e:\n output = (\n 'Translation to backend failed\\n'\n 'Error message: {0}\\n'\n 'Expression repr follows:\\n{1}'.format(e.args[0], self._repr())\n )\n return output\n else:\n return repr(result)\n\n def __hash__(self):\n return hash(self._key)\n\n def __bool__(self):\n raise ValueError(\n \"The truth value of an Ibis expression is not \" \"defined\"\n )\n\n __nonzero__ = __bool__\n\n def _repr(self, memo=None):\n from ibis.expr.format import ExprFormatter\n\n return ExprFormatter(self, memo=memo).get_result()\n\n @property\n def _safe_name(self):\n \"\"\"Get the name of an expression `expr`, returning ``None`` if the\n expression has no name.\n\n Returns\n -------\n Optional[str]\n \"\"\"\n try:\n return self.get_name()\n except (com.ExpressionError, AttributeError):\n return None\n\n @property\n def _key(self):\n \"\"\"Key suitable for hashing an expression.\n\n Returns\n -------\n Tuple[Type[Expr], Optional[str], ibis.expr.operations.Node]\n A tuple of hashable objects uniquely identifying this expression.\n \"\"\"\n return type(self), self._safe_name, self.op()\n\n def _repr_png_(self):\n if config.options.interactive or not ibis.options.graphviz_repr:\n return None\n try:\n import ibis.expr.visualize as viz\n except ImportError:\n return None\n else:\n try:\n return viz.to_graph(self).pipe(format='png')\n except Exception:\n # Something may go wrong, and we can't error in the notebook\n # so fallback to the default text representation.\n return None\n\n def visualize(self, format='svg'):\n \"\"\"Visualize an expression in the browser as an SVG image.\n\n Parameters\n ----------\n format : str, optional\n Defaults to ``'svg'``. Some additional formats are\n ``'jpeg'`` and ``'png'``. These are specified by the ``graphviz``\n Python library.\n\n Notes\n -----\n This method opens a web browser tab showing the image of the expression\n graph created by the code in :module:`ibis.expr.visualize`.\n\n Raises\n ------\n ImportError\n If ``graphviz`` is not installed.\n \"\"\"\n import ibis.expr.visualize as viz\n\n path = viz.draw(viz.to_graph(self), format=format)\n webbrowser.open('file://{}'.format(os.path.abspath(path)))\n\n def pipe(self, f, *args, **kwargs):\n \"\"\"Generic composition function to enable expression pipelining.\n\n Parameters\n ----------\n f : function or (function, arg_name) tuple\n If the expression needs to be passed as anything other than the first\n argument to the function, pass a tuple with the argument name. For\n example, (f, 'data') if the function f expects a 'data' keyword\n args : positional arguments\n kwargs : keyword arguments\n\n Examples\n --------\n >>> import ibis\n >>> t = ibis.table([('a', 'int64'), ('b', 'string')], name='t')\n >>> f = lambda a: (a + 1).name('a')\n >>> g = lambda a: (a * 2).name('a')\n >>> result1 = t.a.pipe(f).pipe(g)\n >>> result1 # doctest: +NORMALIZE_WHITESPACE\n ref_0\n UnboundTable[table]\n name: t\n schema:\n a : int64\n b : string\n a = Multiply[int64*]\n left:\n a = Add[int64*]\n left:\n a = Column[int64*] 'a' from table\n ref_0\n right:\n Literal[int8]\n 1\n right:\n Literal[int8]\n 2\n >>> result2 = g(f(t.a)) # equivalent to the above\n >>> result1.equals(result2)\n True\n\n Returns\n -------\n result : result type of passed function\n \"\"\"\n if isinstance(f, tuple):\n f, data_keyword = f\n kwargs = kwargs.copy()\n kwargs[data_keyword] = self\n return f(*args, **kwargs)\n else:\n return f(self, *args, **kwargs)\n\n __call__ = pipe\n\n def op(self):\n return self._arg\n\n @property\n def _factory(self):\n return type(self)\n\n def execute(self, limit='default', params=None, **kwargs):\n \"\"\"\n If this expression is based on physical tables in a database backend,\n execute it against that backend.\n\n Parameters\n ----------\n limit : integer or None, default 'default'\n Pass an integer to effect a specific row limit. limit=None means \"no\n limit\". The default is whatever is in ibis.options.\n\n Returns\n -------\n result : expression-dependent\n Result of compiling expression and executing in backend\n \"\"\"\n from ibis.client import execute\n\n return execute(self, limit=limit, params=params, **kwargs)\n\n def compile(self, limit=None, params=None):\n \"\"\"\n Compile expression to whatever execution target, to verify\n\n Returns\n -------\n compiled : value or list\n query representation or list thereof\n \"\"\"\n from ibis.client import compile\n\n return compile(self, limit=limit, params=params)\n\n def verify(self):\n \"\"\"\n Returns True if expression can be compiled to its attached client\n \"\"\"\n try:\n self.compile()\n except Exception:\n return False\n else:\n return True\n\n def equals(self, other, cache=None):\n if type(self) != type(other):\n return False\n return self._arg.equals(other._arg, cache=cache)\n\n def _root_tables(self):\n return self.op().root_tables()\n\n\nclass ExprList(Expr):\n def _type_display(self):\n return ', '.join(expr._type_display() for expr in self.exprs())\n\n def exprs(self):\n return self.op().exprs\n\n def names(self):\n return [x.get_name() for x in self.exprs()]\n\n def types(self):\n return [x.type() for x in self.exprs()]\n\n def schema(self):\n import ibis.expr.schema as sch\n\n return sch.Schema(self.names(), self.types())\n\n def rename(self, f):\n import ibis.expr.operations as ops\n\n new_exprs = [x.name(f(x.get_name())) for x in self.exprs()]\n return ops.ExpressionList(new_exprs).to_expr()\n\n def prefix(self, value):\n return self.rename(lambda x: value + x)\n\n def suffix(self, value):\n return self.rename(lambda x: x + value)\n\n def concat(self, *others):\n \"\"\"\n Concatenate expression lists\n\n Returns\n -------\n combined : ExprList\n \"\"\"\n import ibis.expr.operations as ops\n\n exprs = list(self.exprs())\n for o in others:\n if not isinstance(o, ExprList):\n raise TypeError(o)\n exprs.extend(o.exprs())\n return ops.ExpressionList(exprs).to_expr()\n\n\n# ---------------------------------------------------------------------\n# Helper / factory functions\n\n\nclass ValueExpr(Expr):\n\n \"\"\"\n Base class for a data generating expression having a fixed and known type,\n either a single value (scalar)\n \"\"\"\n\n def __init__(self, arg, dtype, name=None):\n super().__init__(arg)\n self._name = name\n self._dtype = dtype\n\n def equals(self, other, cache=None):\n return (\n isinstance(other, ValueExpr)\n and self._name == other._name\n and self._dtype == other._dtype\n and super().equals(other, cache=cache)\n )\n\n def has_name(self):\n if self._name is not None:\n return True\n return self.op().has_resolved_name()\n\n def get_name(self):\n if self._name is not None:\n # This value has been explicitly named\n return self._name\n\n # In some but not all cases we can get a name from the node that\n # produces the value\n return self.op().resolve_name()\n\n def name(self, name):\n return self._factory(self._arg, name=name)\n\n def type(self):\n return self._dtype\n\n @property\n def _factory(self):\n def factory(arg, name=None):\n return type(self)(arg, dtype=self.type(), name=name)\n\n return factory\n\n\nclass ScalarExpr(ValueExpr):\n def _type_display(self):\n return str(self.type())\n\n\nclass ColumnExpr(ValueExpr):\n def _type_display(self):\n return '{}*'.format(self.type())\n\n def parent(self):\n return self._arg\n\n def to_projection(self):\n \"\"\"\n Promote this column expression to a table projection\n \"\"\"\n roots = self._root_tables()\n if len(roots) > 1:\n raise com.RelationError(\n 'Cannot convert array expression '\n 'involving multiple base table references '\n 'to a projection'\n )\n\n table = TableExpr(roots[0])\n return table.projection([self])\n\n\nclass AnalyticExpr(Expr):\n @property\n def _factory(self):\n def factory(arg):\n return type(self)(arg)\n\n return factory\n\n def _type_display(self):\n return str(self.type())\n\n def type(self):\n return 'analytic'\n\n\nclass TableExpr(Expr):\n @property\n def _factory(self):\n def factory(arg):\n return TableExpr(arg)\n\n return factory\n\n def _type_display(self):\n return 'table'\n\n def _is_valid(self, exprs):\n try:\n self._assert_valid(util.promote_list(exprs))\n except com.RelationError:\n return False\n else:\n return True\n\n def _assert_valid(self, exprs):\n from ibis.expr.analysis import ExprValidator\n\n ExprValidator([self]).validate_all(exprs)\n\n def __contains__(self, name):\n return name in self.schema()\n\n def __getitem__(self, what):\n if isinstance(what, (str, int)):\n return self.get_column(what)\n\n if isinstance(what, slice):\n step = what.step\n if step is not None and step != 1:\n raise ValueError('Slice step can only be 1')\n start = what.start or 0\n stop = what.stop\n\n if stop is None or stop < 0:\n raise ValueError('End index must be a positive number')\n\n if start < 0:\n raise ValueError('Start index must be a positive number')\n\n return self.limit(stop - start, offset=start)\n\n what = bind_expr(self, what)\n\n if isinstance(what, AnalyticExpr):\n what = what._table_getitem()\n\n if isinstance(what, (list, tuple, TableExpr)):\n # Projection case\n return self.projection(what)\n elif isinstance(what, BooleanColumn):\n # Boolean predicate\n return self.filter([what])\n elif isinstance(what, ColumnExpr):\n # Projection convenience\n return self.projection(what)\n else:\n raise NotImplementedError(\n 'Selection rows or columns with {} objects is not '\n 'supported'.format(type(what).__name__)\n )\n\n def __len__(self):\n raise com.ExpressionError('Use .count() instead')\n\n def __setstate__(self, instance_dictionary):\n self.__dict__ = instance_dictionary\n\n def __getattr__(self, key):\n try:\n schema = self.schema()\n except com.IbisError:\n raise AttributeError(key)\n\n if key not in schema:\n raise AttributeError(key)\n\n try:\n return self.get_column(key)\n except com.IbisTypeError:\n raise AttributeError(key)\n\n def __dir__(self):\n attrs = dir(type(self))\n if self._is_materialized():\n attrs = frozenset(attrs + self.schema().names)\n return sorted(attrs)\n\n def _resolve(self, exprs):\n exprs = util.promote_list(exprs)\n return list(map(self._ensure_expr, exprs))\n\n def _ensure_expr(self, expr):\n if isinstance(expr, str):\n return self[expr]\n elif isinstance(expr, (int, np.integer)):\n return self[self.schema().name_at_position(expr)]\n elif not isinstance(expr, Expr):\n return expr(self)\n else:\n return expr\n\n def _get_type(self, name):\n return self._arg.get_type(name)\n\n def get_columns(self, iterable):\n \"\"\"\n Get multiple columns from the table\n\n Examples\n --------\n >>> import ibis\n >>> table = ibis.table(\n ... [\n ... ('a', 'int64'),\n ... ('b', 'string'),\n ... ('c', 'timestamp'),\n ... ('d', 'float'),\n ... ],\n ... name='t'\n ... )\n >>> a, b, c = table.get_columns(['a', 'b', 'c'])\n\n Returns\n -------\n columns : list of column/array expressions\n \"\"\"\n return [self.get_column(x) for x in iterable]\n\n def get_column(self, name):\n \"\"\"\n Get a reference to a single column from the table\n\n Returns\n -------\n column : array expression\n \"\"\"\n import ibis.expr.operations as ops\n\n ref = ops.TableColumn(name, self)\n return ref.to_expr()\n\n @property\n def columns(self):\n return self.schema().names\n\n def schema(self):\n \"\"\"\n Get the schema for this table (if one is known)\n\n Returns\n -------\n schema : Schema\n \"\"\"\n if not self._is_materialized():\n raise com.IbisError('Table operation is not yet materialized')\n return self.op().schema\n\n def _is_materialized(self):\n # The operation produces a known schema\n return self.op().has_schema()\n\n def group_by(self, by=None, **additional_grouping_expressions):\n \"\"\"\n Create an intermediate grouped table expression, pending some group\n operation to be applied with it.\n\n Examples\n --------\n >>> import ibis\n >>> pairs = [('a', 'int32'), ('b', 'timestamp'), ('c', 'double')]\n >>> t = ibis.table(pairs)\n >>> b1, b2 = t.a, t.b\n >>> result = t.group_by([b1, b2]).aggregate(sum_of_c=t.c.sum())\n\n Notes\n -----\n group_by and groupby are equivalent, with `groupby` being provided for\n ease-of-use for pandas users.\n\n Returns\n -------\n grouped_expr : GroupedTableExpr\n \"\"\"\n from ibis.expr.groupby import GroupedTableExpr\n\n return GroupedTableExpr(self, by, **additional_grouping_expressions)\n\n groupby = group_by\n\n\n# -----------------------------------------------------------------------------\n# Declare all typed ValueExprs. This is what the user will actually interact\n# with: an instance of each is well-typed and includes all valid methods\n# defined for each type.\n\n\nclass AnyValue(ValueExpr):\n pass # noqa: E701,E302\n\n\nclass AnyScalar(ScalarExpr, AnyValue):\n pass # noqa: E701,E302\n\n\nclass AnyColumn(ColumnExpr, AnyValue):\n pass # noqa: E701,E302\n\n\nclass NullValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass NullScalar(AnyScalar, NullValue):\n pass # noqa: E701,E302\n\n\nclass NullColumn(AnyColumn, NullValue):\n pass # noqa: E701,E302\n\n\nclass NumericValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass NumericScalar(AnyScalar, NumericValue):\n pass # noqa: E701,E302\n\n\nclass NumericColumn(AnyColumn, NumericValue):\n pass # noqa: E701,E302\n\n\nclass BooleanValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass BooleanScalar(NumericScalar, BooleanValue):\n pass # noqa: E701,E302\n\n\nclass BooleanColumn(NumericColumn, BooleanValue):\n pass # noqa: E701,E302\n\n\nclass IntegerValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass IntegerScalar(NumericScalar, IntegerValue):\n pass # noqa: E701,E302\n\n\nclass IntegerColumn(NumericColumn, IntegerValue):\n pass # noqa: E701,E302\n\n\nclass FloatingValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass FloatingScalar(NumericScalar, FloatingValue):\n pass # noqa: E701,E302\n\n\nclass FloatingColumn(NumericColumn, FloatingValue):\n pass # noqa: E701,E302\n\n\nclass DecimalValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass DecimalScalar(NumericScalar, DecimalValue):\n pass # noqa: E701,E302\n\n\nclass DecimalColumn(NumericColumn, DecimalValue):\n pass # noqa: E701,E302\n\n\nclass StringValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass StringScalar(AnyScalar, StringValue):\n pass # noqa: E701,E302\n\n\nclass StringColumn(AnyColumn, StringValue):\n pass # noqa: E701,E302\n\n\nclass BinaryValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass BinaryScalar(AnyScalar, BinaryValue):\n pass # noqa: E701,E302\n\n\nclass BinaryColumn(AnyColumn, BinaryValue):\n pass # noqa: E701,E302\n\n\nclass TemporalValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass TemporalScalar(AnyScalar, TemporalValue):\n pass # noqa: E701,E302\n\n\nclass TemporalColumn(AnyColumn, TemporalValue):\n pass # noqa: E701,E302\n\n\nclass TimeValue(TemporalValue):\n pass # noqa: E701,E302\n\n\nclass TimeScalar(TemporalScalar, TimeValue):\n pass # noqa: E701,E302\n\n\nclass TimeColumn(TemporalColumn, TimeValue):\n pass # noqa: E701,E302\n\n\nclass DateValue(TemporalValue):\n pass # noqa: E701,E302\n\n\nclass DateScalar(TemporalScalar, DateValue):\n pass # noqa: E701,E302\n\n\nclass DateColumn(TemporalColumn, DateValue):\n pass # noqa: E701,E302\n\n\nclass TimestampValue(TemporalValue):\n pass # noqa: E701,E302\n\n\nclass TimestampScalar(TemporalScalar, TimestampValue):\n pass # noqa: E701,E302\n\n\nclass TimestampColumn(TemporalColumn, TimestampValue):\n pass # noqa: E701,E302\n\n\nclass CategoryValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass CategoryScalar(AnyScalar, CategoryValue):\n pass # noqa: E701,E302\n\n\nclass CategoryColumn(AnyColumn, CategoryValue):\n pass # noqa: E701,E302\n\n\nclass EnumValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass EnumScalar(AnyScalar, EnumValue):\n pass # noqa: E701,E302\n\n\nclass EnumColumn(AnyColumn, EnumValue):\n pass # noqa: E701,E302\n\n\nclass ArrayValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass ArrayScalar(AnyScalar, ArrayValue):\n pass # noqa: E701,E302\n\n\nclass ArrayColumn(AnyColumn, ArrayValue):\n pass # noqa: E701,E302\n\n\nclass SetValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass SetScalar(AnyScalar, SetValue):\n pass # noqa: E701,E302\n\n\nclass SetColumn(AnyColumn, SetValue):\n pass # noqa: E701,E302\n\n\nclass MapValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass MapScalar(AnyScalar, MapValue):\n pass # noqa: E701,E302\n\n\nclass MapColumn(AnyColumn, MapValue):\n pass # noqa: E701,E302\n\n\nclass JSONValue(StringValue):\n pass # noqa: E701,E302\n\n\nclass JSONScalar(StringScalar, JSONValue):\n pass # noqa: E701,E302\n\n\nclass JSONColumn(StringColumn, JSONValue):\n pass # noqa: E701,E302\n\n\nclass JSONBValue(BinaryValue):\n pass # noqa: E701,E302\n\n\nclass JSONBScalar(BinaryScalar, JSONBValue):\n pass # noqa: E701,E302\n\n\nclass JSONBColumn(BinaryColumn, JSONBValue):\n pass # noqa: E701,E302\n\n\nclass StructValue(AnyValue):\n def __dir__(self):\n return sorted(\n frozenset(itertools.chain(dir(type(self)), self.type().names))\n )\n\n\nclass StructScalar(AnyScalar, StructValue):\n pass # noqa: E701,E302\n\n\nclass StructColumn(AnyColumn, StructValue):\n pass # noqa: E701,E302\n\n\nclass IntervalValue(AnyValue):\n pass # noqa: E701,E302\n\n\nclass IntervalScalar(AnyScalar, IntervalValue):\n pass # noqa: E701,E302\n\n\nclass IntervalColumn(AnyColumn, IntervalValue):\n pass # noqa: E701,E302\n\n\nclass GeoSpatialValue(NumericValue):\n pass # noqa: E701,E302\n\n\nclass GeoSpatialScalar(NumericScalar, GeoSpatialValue):\n pass # noqa: E701,E302,E501\n\n\nclass GeoSpatialColumn(NumericColumn, GeoSpatialValue):\n pass # noqa: E701,E302,E501\n\n\nclass PointValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass PointScalar(GeoSpatialScalar, PointValue):\n pass # noqa: E701,E302\n\n\nclass PointColumn(GeoSpatialColumn, PointValue):\n pass # noqa: E701,E302\n\n\nclass LineStringValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass LineStringScalar(GeoSpatialScalar, LineStringValue):\n pass # noqa: E701,E302,E501\n\n\nclass LineStringColumn(GeoSpatialColumn, LineStringValue):\n pass # noqa: E701,E302,E501\n\n\nclass PolygonValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass PolygonScalar(GeoSpatialScalar, PolygonValue):\n pass # noqa: E701,E302\n\n\nclass PolygonColumn(GeoSpatialColumn, PolygonValue):\n pass # noqa: E701,E302\n\n\nclass MultiLineStringValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass MultiLineStringScalar(\n GeoSpatialScalar, MultiLineStringValue\n): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiLineStringColumn(\n GeoSpatialColumn, MultiLineStringValue\n): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiPointValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass MultiPointScalar(GeoSpatialScalar, MultiPointValue): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiPointColumn(GeoSpatialColumn, MultiPointValue): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiPolygonValue(GeoSpatialValue):\n pass # noqa: E701,E302\n\n\nclass MultiPolygonScalar(GeoSpatialScalar, MultiPolygonValue): # noqa: E302\n pass # noqa: E701\n\n\nclass MultiPolygonColumn(GeoSpatialColumn, MultiPolygonValue): # noqa: E302\n pass # noqa: E701\n\n\nclass UUIDValue(StringValue):\n pass # noqa: E701,E302\n\n\nclass UUIDScalar(StringScalar, UUIDValue):\n pass # noqa: E701,E302\n\n\nclass UUIDColumn(StringColumn, UUIDValue):\n pass # noqa: E701,E302\n\n\nclass ListExpr(ColumnExpr, AnyValue):\n @property\n def values(self):\n return self.op().values\n\n def __iter__(self):\n return iter(self.values)\n\n def __getitem__(self, key):\n return self.values[key]\n\n def __add__(self, other):\n other_values = tuple(getattr(other, 'values', other))\n return type(self.op())(self.values + other_values).to_expr()\n\n def __radd__(self, other):\n other_values = tuple(getattr(other, 'values', other))\n return type(self.op())(other_values + self.values).to_expr()\n\n def __bool__(self):\n return bool(self.values)\n\n __nonzero__ = __bool__\n\n def __len__(self):\n return len(self.values)\n\n\nclass TopKExpr(AnalyticExpr):\n def type(self):\n return 'topk'\n\n def _table_getitem(self):\n return self.to_filter()\n\n def to_filter(self):\n # TODO: move to api.py\n import ibis.expr.operations as ops\n\n return ops.SummaryFilter(self).to_expr()\n\n def to_aggregation(\n self, metric_name=None, parent_table=None, backup_metric_name=None\n ):\n \"\"\"\n Convert the TopK operation to a table aggregation\n \"\"\"\n op = self.op()\n\n arg_table = find_base_table(op.arg)\n\n by = op.by\n if not isinstance(by, Expr):\n by = by(arg_table)\n by_table = arg_table\n else:\n by_table = find_base_table(op.by)\n\n if metric_name is None:\n if by.get_name() == op.arg.get_name():\n by = by.name(backup_metric_name)\n else:\n by = by.name(metric_name)\n\n if arg_table.equals(by_table):\n agg = arg_table.aggregate(by, by=[op.arg])\n elif parent_table is not None:\n agg = parent_table.aggregate(by, by=[op.arg])\n else:\n raise com.IbisError(\n 'Cross-table TopK; must provide a parent ' 'joined table'\n )\n\n return agg.sort_by([(by.get_name(), False)]).limit(op.k)\n\n\nclass SortExpr(Expr):\n def _type_display(self):\n return 'array-sort'\n\n def get_name(self):\n return self.op().resolve_name()\n\n\nclass DayOfWeek(Expr):\n def index(self):\n \"\"\"Get the index of the day of the week.\n\n Returns\n -------\n IntegerValue\n The index of the day of the week. Ibis follows pandas conventions,\n where **Monday = 0 and Sunday = 6**.\n \"\"\"\n import ibis.expr.operations as ops\n\n return ops.DayOfWeekIndex(self.op().arg).to_expr()\n\n def full_name(self):\n \"\"\"Get the name of the day of the week.\n\n Returns\n -------\n StringValue\n The name of the day of the week\n \"\"\"\n import ibis.expr.operations as ops\n\n return ops.DayOfWeekName(self.op().arg).to_expr()\n\n\ndef bind_expr(table, expr):\n if isinstance(expr, (list, tuple)):\n return [bind_expr(table, x) for x in expr]\n\n return table._ensure_expr(expr)\n\n\n# TODO: move to analysis\ndef find_base_table(expr):\n if isinstance(expr, TableExpr):\n return expr\n\n for arg in expr.op().flat_args():\n if isinstance(arg, Expr):\n r = find_base_table(arg)\n if isinstance(r, TableExpr):\n return r\n\n\n_NULL = None\n\n\ndef null():\n \"\"\"Create a NULL/NA scalar\"\"\"\n import ibis.expr.operations as ops\n\n global _NULL\n if _NULL is None:\n _NULL = ops.NullLiteral().to_expr()\n\n return _NULL\n\n\ndef literal(value, type=None):\n \"\"\"Create a scalar expression from a Python value.\n\n Parameters\n ----------\n value : some Python basic type\n A Python value\n type : ibis type or string, optional\n An instance of :class:`ibis.expr.datatypes.DataType` or a string\n indicating the ibis type of `value`. This parameter should only be used\n in cases where ibis's type inference isn't sufficient for discovering\n the type of `value`.\n\n Returns\n -------\n literal_value : Literal\n An expression representing a literal value\n\n Examples\n --------\n >>> import ibis\n >>> x = ibis.literal(42)\n >>> x.type()\n int8\n >>> y = ibis.literal(42, type='double')\n >>> y.type()\n float64\n >>> ibis.literal('foobar', type='int64') # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n TypeError: Value 'foobar' cannot be safely coerced to int64\n \"\"\"\n import ibis.expr.datatypes as dt\n import ibis.expr.operations as ops\n\n if hasattr(value, 'op') and isinstance(value.op(), ops.Literal):\n return value\n\n try:\n inferred_dtype = dt.infer(value)\n except com.InputTypeError:\n has_inferred = False\n else:\n has_inferred = True\n\n if type is None:\n has_explicit = False\n else:\n has_explicit = True\n explicit_dtype = dt.dtype(type)\n\n if has_explicit and has_inferred:\n try:\n # ensure type correctness: check that the inferred dtype is\n # implicitly castable to the explicitly given dtype and value\n dtype = inferred_dtype.cast(explicit_dtype, value=value)\n except com.IbisTypeError:\n raise TypeError(\n 'Value {!r} cannot be safely coerced to {}'.format(value, type)\n )\n elif has_explicit:\n dtype = explicit_dtype\n elif has_inferred:\n dtype = inferred_dtype\n else:\n raise TypeError(\n 'The datatype of value {!r} cannot be inferred, try '\n 'passing it explicitly with the `type` keyword.'.format(value)\n )\n\n if dtype is dt.null:\n return null().cast(dtype)\n else:\n return ops.Literal(value, dtype=dtype).to_expr()\n\n\ndef sequence(values):\n \"\"\"\n Wrap a list of Python values as an Ibis sequence type\n\n Parameters\n ----------\n values : list\n Should all be None or the same type\n\n Returns\n -------\n seq : Sequence\n \"\"\"\n import ibis.expr.operations as ops\n\n return ops.ValueList(values).to_expr()\n\n\ndef as_value_expr(val):\n import pandas as pd\n\n if not isinstance(val, Expr):\n if isinstance(val, (tuple, list)):\n val = sequence(val)\n elif isinstance(val, pd.Series):\n val = sequence(list(val))\n else:\n val = literal(val)\n\n return val\n\n\ndef param(type):\n \"\"\"Create a parameter of a particular type to be defined just before\n execution.\n\n Parameters\n ----------\n type : dt.DataType\n The type of the unbound parameter, e.g., double, int64, date, etc.\n\n Returns\n -------\n ScalarExpr\n\n Examples\n --------\n >>> import ibis\n >>> import ibis.expr.datatypes as dt\n >>> start = ibis.param(dt.date)\n >>> end = ibis.param(dt.date)\n >>> schema = [('timestamp_col', 'timestamp'), ('value', 'double')]\n >>> t = ibis.table(schema)\n >>> predicates = [t.timestamp_col >= start, t.timestamp_col <= end]\n >>> expr = t.filter(predicates).value.sum()\n \"\"\"\n import ibis.expr.datatypes as dt\n import ibis.expr.operations as ops\n\n return ops.ScalarParameter(dt.dtype(type)).to_expr()\n\n\nclass UnnamedMarker:\n pass\n\n\nunnamed = UnnamedMarker()\n", "path": "ibis/expr/types.py" } ]
diff --git a/docs/source/release/index.rst b/docs/source/release/index.rst index 514d7d16560d..7809c575b829 100644 --- a/docs/source/release/index.rst +++ b/docs/source/release/index.rst @@ -12,6 +12,7 @@ Release Notes These release notes are for versions of ibis **1.0 and later**. Release notes for pre-1.0 versions of ibis can be found at :doc:`release-pre-1.0` +* :bug:`2157` Fix interactive mode returning a expression instead of the value when used in Jupyter * :feature:`2093` IsNan implementation for OmniSciDB * :feature:`2094` [OmnisciDB] Support add_columns and drop_columns for OmnisciDB table * :bug:`2127` Fix PySpark error when doing alias after selection diff --git a/ibis/expr/tests/test_interactive.py b/ibis/expr/tests/test_interactive.py index 97e18d5fece9..aad7214d7469 100644 --- a/ibis/expr/tests/test_interactive.py +++ b/ibis/expr/tests/test_interactive.py @@ -14,6 +14,8 @@ import unittest +import pytest + import ibis.config as config from ibis.expr.tests.mocks import MockConnection @@ -30,6 +32,22 @@ def test_interactive_execute_on_repr(self): assert len(self.con.executed_queries) > 0 + def test_repr_png_is_none_in_interactive(self): + table = self.con.table('functional_alltypes') + + with config.option_context('interactive', True): + assert table._repr_png_() is None + + def test_repr_png_is_not_none_in_not_interactive(self): + pytest.importorskip('ibis.expr.visualize') + + table = self.con.table('functional_alltypes') + + with config.option_context( + 'interactive', False + ), config.option_context('graphviz_repr', True): + assert table._repr_png_() is not None + def test_default_limit(self): table = self.con.table('functional_alltypes') diff --git a/ibis/expr/types.py b/ibis/expr/types.py index 1abcae398843..915e696abf83 100644 --- a/ibis/expr/types.py +++ b/ibis/expr/types.py @@ -79,7 +79,7 @@ def _key(self): return type(self), self._safe_name, self.op() def _repr_png_(self): - if not ibis.options.graphviz_repr: + if config.options.interactive or not ibis.options.graphviz_repr: return None try: import ibis.expr.visualize as viz
docker__docker-py-3257
Breaks with requests 2.32.0: Not supported URL scheme http+docker With requests 2.32.0 (released about an hour ago as I write this), the docker library as called by [tox-docker](https://github.com/tox-dev/tox-docker) fails with the following exception: ``` Traceback (most recent call last): File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/adapters.py", line 532, in send conn = self._get_connection(request, verify, proxies=proxies, cert=cert) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/adapters.py", line 400, in _get_connection conn = self.poolmanager.connection_from_host( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/urllib3/poolmanager.py", line 304, in connection_from_host return self.connection_from_context(request_context) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/urllib3/poolmanager.py", line 326, in connection_from_context raise URLSchemeUnknown(scheme) urllib3.exceptions.URLSchemeUnknown: Not supported URL scheme http+docker During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py", line 214, in _retrieve_server_version return self.version(api_version=False)["ApiVersion"] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/daemon.py", line 181, in version return self._result(self._get(url), json=True) ^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/utils/decorators.py", line 46, in inner return f(self, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py", line 237, in _get return self.get(url, **self._set_request_timeout(kwargs)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/sessions.py", line 602, in get return self.request("GET", url, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/sessions.py", line 589, in request resp = self.send(prep, **send_kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/sessions.py", line 703, in send r = adapter.send(request, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/requests/adapters.py", line 534, in send raise InvalidURL(e, request=request) requests.exceptions.InvalidURL: Not supported URL scheme http+docker During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox/session/cmd/run/single.py", line 48, in _evaluate code, outcomes = run_commands(tox_env, no_test) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox/session/cmd/run/single.py", line 79, in run_commands MANAGER.tox_before_run_commands(tox_env) File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox/plugin/manager.py", line 88, in tox_before_run_commands self.manager.hook.tox_before_run_commands(tox_env=tox_env) File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_callers.py", line 139, in _multicall raise exception.with_traceback(exception.__traceback__) File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) ^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox_docker/tox4/plugin.py", line 73, in tox_before_run_commands docker_build_or_pull(container_config, log) File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox_docker/plugin.py", line 57, in docker_build_or_pull docker_pull(container_config, log) File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/tox_docker/plugin.py", line 65, in docker_pull docker = docker_module.from_env(version="auto") ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/client.py", line 96, in from_env return cls( ^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/client.py", line 45, in __init__ self.api = APIClient(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py", line 197, in __init__ self._version = self._retrieve_server_version() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/eagle/dvl/venvs/gafaelfawr/lib/python3.12/site-packages/docker/api/client.py", line 221, in _retrieve_server_version raise DockerException( docker.errors.DockerException: Error while fetching server API version: Not supported URL scheme http+docker ``` Reverting to requests 2.31.0 without any other changes fixes the problem.
[ { "content": "import requests.adapters\n\n\nclass BaseHTTPAdapter(requests.adapters.HTTPAdapter):\n def close(self):\n super().close()\n if hasattr(self, 'pools'):\n self.pools.clear()\n", "path": "docker/transport/basehttpadapter.py" } ]
[ { "content": "import requests.adapters\n\n\nclass BaseHTTPAdapter(requests.adapters.HTTPAdapter):\n def close(self):\n super().close()\n if hasattr(self, 'pools'):\n self.pools.clear()\n\n # Fix for requests 2.32.2+:\n # https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05\n def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):\n return self.get_connection(request.url, proxies)\n", "path": "docker/transport/basehttpadapter.py" } ]
diff --git a/docker/transport/basehttpadapter.py b/docker/transport/basehttpadapter.py index dfbb193b9..2301b6b07 100644 --- a/docker/transport/basehttpadapter.py +++ b/docker/transport/basehttpadapter.py @@ -6,3 +6,8 @@ def close(self): super().close() if hasattr(self, 'pools'): self.pools.clear() + + # Fix for requests 2.32.2+: + # https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05 + def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): + return self.get_connection(request.url, proxies)
kornia__kornia-579
Backslash not showing in equation for Tversky Loss ## 📚 Documentation The backslashs in the denominator of the Tversky loss equation is not rendered correctly with MathJax. As shown in this screenshot it only inserts a little space between P and G and thus does not correctly render the equation. ![tversky_doc](https://user-images.githubusercontent.com/7403236/83774582-28e2c480-a686-11ea-8fd3-dbc509719885.png) <!-- A clear and concise description of what content in https://kornia.readthedocs.io is an issue. If this has to do with the general https://kornia.org website, please file an issue at https://github.com/kornia/kornia.github.io/issues/new/choose instead. If this has to do with https://kornia.org/tutorials, please file an issue at https://github.com/kornia/tutorials/issues/new -->
[ { "content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom kornia.utils import one_hot\n\n# based on:\n# https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py\n\n\ndef tversky_loss(input: torch.Tensor, target: torch.Tensor,\n alpha: float, beta: float, eps: float = 1e-8) -> torch.Tensor:\n r\"\"\"Function that computes Tversky loss.\n\n See :class:`~kornia.losses.TverskyLoss` for details.\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\"\n .format(type(input)))\n\n if not len(input.shape) == 4:\n raise ValueError(\"Invalid input shape, we expect BxNxHxW. Got: {}\"\n .format(input.shape))\n\n if not input.shape[-2:] == target.shape[-2:]:\n raise ValueError(\"input and target shapes must be the same. Got: {} and {}\"\n .format(input.shape, input.shape))\n\n if not input.device == target.device:\n raise ValueError(\n \"input and target must be in the same device. Got: {} and {}\" .format(\n input.device, target.device))\n\n # compute softmax over the classes axis\n input_soft: torch.Tensor = F.softmax(input, dim=1)\n\n # create the labels one hot tensor\n target_one_hot: torch.Tensor = one_hot(\n target, num_classes=input.shape[1],\n device=input.device, dtype=input.dtype)\n\n # compute the actual dice score\n dims = (1, 2, 3)\n intersection = torch.sum(input_soft * target_one_hot, dims)\n fps = torch.sum(input_soft * (-target_one_hot + 1.), dims)\n fns = torch.sum((-input_soft + 1.) * target_one_hot, dims)\n\n numerator = intersection\n denominator = intersection + alpha * fps + beta * fns\n tversky_loss = numerator / (denominator + eps)\n return torch.mean(-tversky_loss + 1.)\n\n\nclass TverskyLoss(nn.Module):\n r\"\"\"Criterion that computes Tversky Coeficient loss.\n\n According to [1], we compute the Tversky Coefficient as follows:\n\n .. math::\n\n \\text{S}(P, G, \\alpha; \\beta) =\n \\frac{|PG|}{|PG| + \\alpha |P \\ G| + \\beta |G \\ P|}\n\n where:\n - :math:`P` and :math:`G` are the predicted and ground truth binary\n labels.\n - :math:`\\alpha` and :math:`\\beta` control the magnitude of the\n penalties for FPs and FNs, respectively.\n\n Notes:\n - :math:`\\alpha = \\beta = 0.5` => dice coeff\n - :math:`\\alpha = \\beta = 1` => tanimoto coeff\n - :math:`\\alpha + \\beta = 1` => F beta coeff\n\n Shape:\n - Input: :math:`(N, C, H, W)` where C = number of classes.\n - Target: :math:`(N, H, W)` where each value is\n :math:`0 ≤ targets[i] ≤ C−1`.\n\n Examples:\n >>> N = 5 # num_classes\n >>> loss = kornia.losses.TverskyLoss(alpha=0.5, beta=0.5)\n >>> input = torch.randn(1, N, 3, 5, requires_grad=True)\n >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)\n >>> output = loss(input, target)\n >>> output.backward()\n\n References:\n [1]: https://arxiv.org/abs/1706.05721\n \"\"\"\n\n def __init__(self, alpha: float, beta: float, eps: float = 1e-8) -> None:\n super(TverskyLoss, self).__init__()\n self.alpha: float = alpha\n self.beta: float = beta\n self.eps: float = eps\n\n def forward( # type: ignore\n self,\n input: torch.Tensor,\n target: torch.Tensor) -> torch.Tensor:\n return tversky_loss(input, target, self.alpha, self.beta, self.eps)\n", "path": "kornia/losses/tversky.py" } ]
[ { "content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom kornia.utils import one_hot\n\n# based on:\n# https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py\n\n\ndef tversky_loss(input: torch.Tensor, target: torch.Tensor,\n alpha: float, beta: float, eps: float = 1e-8) -> torch.Tensor:\n r\"\"\"Function that computes Tversky loss.\n\n See :class:`~kornia.losses.TverskyLoss` for details.\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\"\n .format(type(input)))\n\n if not len(input.shape) == 4:\n raise ValueError(\"Invalid input shape, we expect BxNxHxW. Got: {}\"\n .format(input.shape))\n\n if not input.shape[-2:] == target.shape[-2:]:\n raise ValueError(\"input and target shapes must be the same. Got: {} and {}\"\n .format(input.shape, input.shape))\n\n if not input.device == target.device:\n raise ValueError(\n \"input and target must be in the same device. Got: {} and {}\" .format(\n input.device, target.device))\n\n # compute softmax over the classes axis\n input_soft: torch.Tensor = F.softmax(input, dim=1)\n\n # create the labels one hot tensor\n target_one_hot: torch.Tensor = one_hot(\n target, num_classes=input.shape[1],\n device=input.device, dtype=input.dtype)\n\n # compute the actual dice score\n dims = (1, 2, 3)\n intersection = torch.sum(input_soft * target_one_hot, dims)\n fps = torch.sum(input_soft * (-target_one_hot + 1.), dims)\n fns = torch.sum((-input_soft + 1.) * target_one_hot, dims)\n\n numerator = intersection\n denominator = intersection + alpha * fps + beta * fns\n tversky_loss = numerator / (denominator + eps)\n return torch.mean(-tversky_loss + 1.)\n\n\nclass TverskyLoss(nn.Module):\n r\"\"\"Criterion that computes Tversky Coeficient loss.\n\n According to [1], we compute the Tversky Coefficient as follows:\n\n .. math::\n\n \\text{S}(P, G, \\alpha; \\beta) =\n \\frac{|PG|}{|PG| + \\alpha |P \\setminus G| + \\beta |G \\setminus P|}\n\n where:\n - :math:`P` and :math:`G` are the predicted and ground truth binary\n labels.\n - :math:`\\alpha` and :math:`\\beta` control the magnitude of the\n penalties for FPs and FNs, respectively.\n\n Notes:\n - :math:`\\alpha = \\beta = 0.5` => dice coeff\n - :math:`\\alpha = \\beta = 1` => tanimoto coeff\n - :math:`\\alpha + \\beta = 1` => F beta coeff\n\n Shape:\n - Input: :math:`(N, C, H, W)` where C = number of classes.\n - Target: :math:`(N, H, W)` where each value is\n :math:`0 ≤ targets[i] ≤ C−1`.\n\n Examples:\n >>> N = 5 # num_classes\n >>> loss = kornia.losses.TverskyLoss(alpha=0.5, beta=0.5)\n >>> input = torch.randn(1, N, 3, 5, requires_grad=True)\n >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)\n >>> output = loss(input, target)\n >>> output.backward()\n\n References:\n [1]: https://arxiv.org/abs/1706.05721\n \"\"\"\n\n def __init__(self, alpha: float, beta: float, eps: float = 1e-8) -> None:\n super(TverskyLoss, self).__init__()\n self.alpha: float = alpha\n self.beta: float = beta\n self.eps: float = eps\n\n def forward( # type: ignore\n self,\n input: torch.Tensor,\n target: torch.Tensor) -> torch.Tensor:\n return tversky_loss(input, target, self.alpha, self.beta, self.eps)\n", "path": "kornia/losses/tversky.py" } ]
diff --git a/kornia/losses/tversky.py b/kornia/losses/tversky.py index 969bd1d82f..089a7faa2a 100644 --- a/kornia/losses/tversky.py +++ b/kornia/losses/tversky.py @@ -61,7 +61,7 @@ class TverskyLoss(nn.Module): .. math:: \text{S}(P, G, \alpha; \beta) = - \frac{|PG|}{|PG| + \alpha |P \ G| + \beta |G \ P|} + \frac{|PG|}{|PG| + \alpha |P \setminus G| + \beta |G \setminus P|} where: - :math:`P` and :math:`G` are the predicted and ground truth binary
bridgecrewio__checkov-5468
[CKV_DOCKER_7] Erroneous failed check when --platform flag is used in multistage Dockerfile **Describe the issue** In the `CKV_DOCKER_7` check, when the `--platform` flag is used in a multistage Dockerfile, the check fails erroneously for images that have been created within the current Dockerfile and thus no version pegging should be required. **Examples** The Dockerfile below is green ``` FROM golang:1.20-bullseye AS base FROM base AS build ``` but as soon as a `--platform` flag is added, the check fails: ``` FROM --platform=$BUILDPLATFORM golang:1.20-bullseye AS base FROM base AS build ``` **Version (please complete the following information):** - Checkov Version 2.4.4
[ { "content": "from __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\nMULTI_STAGE_PATTERN = re.compile(r\"(\\S+)\\s+as\\s+(\\S+)\", re.IGNORECASE)\n\n\nclass ReferenceLatestTag(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure the base image uses a non latest version tag\"\n id = \"CKV_DOCKER_7\"\n supported_instructions = (\"FROM\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n stages = []\n\n for content in conf:\n base_image = content[\"value\"]\n if \" as \" in base_image.lower():\n # do an initial lookup before using the regex\n multi_stage = re.match(MULTI_STAGE_PATTERN, base_image)\n if multi_stage:\n base_image = multi_stage[1]\n stages.append(multi_stage[2])\n\n if \":\" not in base_image and base_image not in stages and base_image != \"scratch\":\n return CheckResult.FAILED, [content]\n elif base_image.endswith(\":latest\"):\n return CheckResult.FAILED, [content]\n return CheckResult.PASSED, [content]\n\n\ncheck = ReferenceLatestTag()\n", "path": "checkov/dockerfile/checks/ReferenceLatestTag.py" } ]
[ { "content": "from __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\nMULTI_STAGE_PATTERN = re.compile(r\"(?:--platform=\\S+\\s+)?(\\S+)\\s+as\\s+(\\S+)\", re.IGNORECASE)\n\n\nclass ReferenceLatestTag(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure the base image uses a non latest version tag\"\n id = \"CKV_DOCKER_7\"\n supported_instructions = (\"FROM\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n stages = []\n\n for content in conf:\n base_image = content[\"value\"]\n if \" as \" in base_image.lower():\n # do an initial lookup before using the regex\n multi_stage = re.match(MULTI_STAGE_PATTERN, base_image)\n if multi_stage:\n base_image = multi_stage[1]\n stages.append(multi_stage[2])\n\n if \":\" not in base_image and base_image not in stages and base_image != \"scratch\":\n return CheckResult.FAILED, [content]\n elif base_image.endswith(\":latest\"):\n return CheckResult.FAILED, [content]\n return CheckResult.PASSED, [content]\n\n\ncheck = ReferenceLatestTag()\n", "path": "checkov/dockerfile/checks/ReferenceLatestTag.py" } ]
diff --git a/checkov/dockerfile/checks/ReferenceLatestTag.py b/checkov/dockerfile/checks/ReferenceLatestTag.py index 8bcfed571e..ec0dc6629a 100644 --- a/checkov/dockerfile/checks/ReferenceLatestTag.py +++ b/checkov/dockerfile/checks/ReferenceLatestTag.py @@ -9,7 +9,7 @@ if TYPE_CHECKING: from dockerfile_parse.parser import _Instruction -MULTI_STAGE_PATTERN = re.compile(r"(\S+)\s+as\s+(\S+)", re.IGNORECASE) +MULTI_STAGE_PATTERN = re.compile(r"(?:--platform=\S+\s+)?(\S+)\s+as\s+(\S+)", re.IGNORECASE) class ReferenceLatestTag(BaseDockerfileCheck): diff --git a/tests/dockerfile/checks/example_ReferenceLatestTag/success_multi_stage_platform/Dockerfile b/tests/dockerfile/checks/example_ReferenceLatestTag/success_multi_stage_platform/Dockerfile new file mode 100644 index 0000000000..764f7f2eb6 --- /dev/null +++ b/tests/dockerfile/checks/example_ReferenceLatestTag/success_multi_stage_platform/Dockerfile @@ -0,0 +1,5 @@ +FROM --platform=$BUILDPLATFORM python:3.11-slim AS build +COPY test.sh /test.sh + +FROM build as run +LABEL maintainer=checkov diff --git a/tests/dockerfile/checks/test_ReferenceLatestTag.py b/tests/dockerfile/checks/test_ReferenceLatestTag.py index 2c406d36d8..c51b55fb75 100644 --- a/tests/dockerfile/checks/test_ReferenceLatestTag.py +++ b/tests/dockerfile/checks/test_ReferenceLatestTag.py @@ -20,7 +20,8 @@ def test(self): "/success_multi_stage/Dockerfile.FROM", "/success_multi_stage_capital/Dockerfile.FROM", "/success_scratch/Dockerfile.FROM", - "/success_multi_stage_scratch/Dockerfile.FROM" + "/success_multi_stage_scratch/Dockerfile.FROM", + "/success_multi_stage_platform/Dockerfile.FROM", } failing_resources = { @@ -31,8 +32,8 @@ def test(self): passed_check_resources = set([c.resource for c in report.passed_checks]) failed_check_resources = set([c.resource for c in report.failed_checks]) - self.assertEqual(summary["passed"], 5) - self.assertEqual(summary["failed"], 2) + self.assertEqual(summary["passed"], len(passing_resources)) + self.assertEqual(summary["failed"], len(failing_resources)) self.assertEqual(summary["skipped"], 0) self.assertEqual(summary["parsing_errors"], 0)
ibis-project__ibis-3261
bug: correlated subquery with join generates incorrect SQL This Ibis expression (using the TPC-H database; a simplified version of TPC-H query 2) appears to be valid, but does not generate correct SQL: ``` part = con.table("part") supplier = con.table("supplier") partsupp = con.table("partsupp") q = part.join(partsupp, part.P_PARTKEY == partsupp.PS_PARTKEY) q = q[ part.P_PARTKEY, partsupp.PS_SUPPLYCOST, ] subq = partsupp.join(supplier, supplier.S_SUPPKEY == partsupp.PS_SUPPKEY) subq = subq.materialize() subq = subq[subq.PS_PARTKEY == q.P_PARTKEY] q = q[q.PS_SUPPLYCOST == subq.PS_SUPPLYCOST.min()] ``` ### Expected Result ```sql SELECT t0."P_PARTKEY", t0."PS_SUPPLYCOST" FROM base.part AS t1 JOIN base.partsupp AS t2 ON t1."P_PARTKEY" = t2."PS_PARTKEY") AS t0 WHERE t0."PS_SUPPLYCOST" = (SELECT min(t3."PS_SUPPLYCOST") AS MIN FROM base.partsupp AS t2 JOIN base.supplier AS t6 ON t6."S_SUPPKEY" = t2."PS_SUPPKEY") AS t3 WHERE t3."PS_PARTKEY" = "P_PARTKEY" ``` ### Actual Result The generated SQL is: ```sql SELECT t0."P_PARTKEY", t0."PS_SUPPLYCOST" FROM (SELECT t1."P_PARTKEY" AS "P_PARTKEY", t2."PS_SUPPLYCOST" AS "PS_SUPPLYCOST" FROM base.part AS t1 JOIN base.partsupp AS t2 ON t1."P_PARTKEY" = t2."PS_PARTKEY") AS t0 WHERE t0."PS_SUPPLYCOST" = (SELECT min(t3."PS_SUPPLYCOST") AS MIN FROM (SELECT [...] FROM base.partsupp AS t2 JOIN base.supplier AS t6 ON t6."S_SUPPKEY" = t2."PS_SUPPKEY") AS t3, (SELECT t1."P_PARTKEY" AS "P_PARTKEY", t2."PS_SUPPLYCOST" AS "PS_SUPPLYCOST" FROM base.part AS t1 JOIN base.partsupp AS t2 ON t1."P_PARTKEY" = t2."PS_PARTKEY") WHERE t3."PS_PARTKEY" = "P_PARTKEY") ``` The subquery above does an extra join with a copy of the outer query, whereas it should just reference the existing value in the outer query.
[ { "content": "import operator\nfrom typing import Any, Dict\n\nimport sqlalchemy as sa\nimport sqlalchemy.sql as sql\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.analysis as L\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nimport ibis.expr.window as W\n\nfrom .database import AlchemyTable\nfrom .geospatial import geospatial_supported\n\n\ndef variance_reduction(func_name):\n suffix = {'sample': 'samp', 'pop': 'pop'}\n\n def variance_compiler(t, expr):\n arg, how, where = expr.op().args\n\n if arg.type().equals(dt.boolean):\n arg = arg.cast('int32')\n\n func = getattr(\n sa.func, '{}_{}'.format(func_name, suffix.get(how, 'samp'))\n )\n\n if where is not None:\n arg = where.ifelse(arg, None)\n return func(t.translate(arg))\n\n return variance_compiler\n\n\ndef infix_op(infix_sym):\n def formatter(t, expr):\n op = expr.op()\n left, right = op.args\n\n left_arg = t.translate(left)\n right_arg = t.translate(right)\n return left_arg.op(infix_sym)(right_arg)\n\n return formatter\n\n\ndef fixed_arity(sa_func, arity):\n if isinstance(sa_func, str):\n sa_func = getattr(sa.func, sa_func)\n\n def formatter(t, expr):\n if arity != len(expr.op().args):\n raise com.IbisError('incorrect number of args')\n\n return _varargs_call(sa_func, t, expr)\n\n return formatter\n\n\ndef varargs(sa_func):\n def formatter(t, expr):\n op = expr.op()\n trans_args = [t.translate(arg) for arg in op.arg]\n return sa_func(*trans_args)\n\n return formatter\n\n\ndef _varargs_call(sa_func, t, expr):\n op = expr.op()\n trans_args = [t.translate(arg) for arg in op.args]\n return sa_func(*trans_args)\n\n\ndef get_sqla_table(ctx, table):\n if ctx.has_ref(table):\n ctx_level = ctx\n sa_table = ctx_level.get_ref(table)\n while sa_table is None and ctx_level.parent is not ctx_level:\n ctx_level = ctx_level.parent\n sa_table = ctx_level.get_ref(table)\n else:\n op = table.op()\n if isinstance(op, AlchemyTable):\n sa_table = op.sqla_table\n else:\n sa_table = ctx.get_compiled_expr(table)\n\n return sa_table\n\n\ndef _table_column(t, expr):\n op = expr.op()\n ctx = t.context\n table = op.table\n\n sa_table = get_sqla_table(ctx, table)\n out_expr = getattr(sa_table.c, op.name)\n\n # If the column does not originate from the table set in the current SELECT\n # context, we should format as a subquery\n if t.permit_subquery and ctx.is_foreign_expr(table):\n return sa.select([out_expr])\n\n return out_expr\n\n\ndef _table_array_view(t, expr):\n ctx = t.context\n table = ctx.get_compiled_expr(expr.op().table)\n return table\n\n\ndef _exists_subquery(t, expr):\n from .query_builder import AlchemyCompiler\n\n op = expr.op()\n ctx = t.context\n\n filtered = op.foreign_table.filter(op.predicates).projection(\n [ir.literal(1).name(ir.unnamed)]\n )\n\n sub_ctx = ctx.subcontext()\n clause = AlchemyCompiler.to_sql(filtered, sub_ctx, exists=True)\n\n if isinstance(op, ops.NotExistsSubquery):\n clause = sa.not_(clause)\n\n return clause\n\n\ndef _cast(t, expr):\n op = expr.op()\n arg, target_type = op.args\n sa_arg = t.translate(arg)\n sa_type = t.get_sqla_type(target_type)\n\n if isinstance(arg, ir.CategoryValue) and target_type == 'int32':\n return sa_arg\n else:\n return sa.cast(sa_arg, sa_type)\n\n\ndef _contains(t, expr):\n op = expr.op()\n\n left, right = (t.translate(arg) for arg in op.args)\n\n return left.in_(right)\n\n\ndef _not_contains(t, expr):\n return sa.not_(_contains(t, expr))\n\n\ndef reduction(sa_func):\n def formatter(t, expr):\n op = expr.op()\n *args, where = op.args\n\n return _reduction_format(t, sa_func, where, *args)\n\n return formatter\n\n\ndef _reduction_format(t, sa_func, where, arg, *args):\n if where is not None:\n arg = t.translate(where.ifelse(arg, ibis.NA))\n else:\n arg = t.translate(arg)\n\n return sa_func(arg, *map(t.translate, args))\n\n\ndef _literal(t, expr):\n dtype = expr.type()\n value = expr.op().value\n\n if isinstance(dtype, dt.Set):\n return list(map(sa.literal, value))\n\n return sa.literal(value)\n\n\ndef _value_list(t, expr):\n return [t.translate(x) for x in expr.op().values]\n\n\ndef _is_null(t, expr):\n arg = t.translate(expr.op().args[0])\n return arg.is_(sa.null())\n\n\ndef _not_null(t, expr):\n arg = t.translate(expr.op().args[0])\n return arg.isnot(sa.null())\n\n\ndef _round(t, expr):\n op = expr.op()\n arg, digits = op.args\n sa_arg = t.translate(arg)\n\n f = sa.func.round\n\n if digits is not None:\n sa_digits = t.translate(digits)\n return f(sa_arg, sa_digits)\n else:\n return f(sa_arg)\n\n\ndef _floor_divide(t, expr):\n left, right = map(t.translate, expr.op().args)\n return sa.func.floor(left / right)\n\n\ndef _count_distinct(t, expr):\n arg, where = expr.op().args\n\n if where is not None:\n sa_arg = t.translate(where.ifelse(arg, None))\n else:\n sa_arg = t.translate(arg)\n\n return sa.func.count(sa_arg.distinct())\n\n\ndef _simple_case(t, expr):\n op = expr.op()\n\n cases = [op.base == case for case in op.cases]\n return _translate_case(t, cases, op.results, op.default)\n\n\ndef _searched_case(t, expr):\n op = expr.op()\n return _translate_case(t, op.cases, op.results, op.default)\n\n\ndef _translate_case(t, cases, results, default):\n case_args = [t.translate(arg) for arg in cases]\n result_args = [t.translate(arg) for arg in results]\n\n whens = zip(case_args, result_args)\n default = t.translate(default)\n\n return sa.case(list(whens), else_=default)\n\n\ndef _negate(t, expr):\n op = expr.op()\n (arg,) = map(t.translate, op.args)\n return sa.not_(arg) if isinstance(expr, ir.BooleanValue) else -arg\n\n\ndef unary(sa_func):\n return fixed_arity(sa_func, 1)\n\n\ndef _string_like(t, expr):\n arg, pattern, escape = expr.op().args\n result = t.translate(arg).like(t.translate(pattern), escape=escape)\n return result\n\n\ndef _startswith(t, expr):\n arg, start = expr.op().args\n return t.translate(arg).startswith(t.translate(start))\n\n\ndef _endswith(t, expr):\n arg, start = expr.op().args\n return t.translate(arg).endswith(t.translate(start))\n\n\n_cumulative_to_reduction = {\n ops.CumulativeSum: ops.Sum,\n ops.CumulativeMin: ops.Min,\n ops.CumulativeMax: ops.Max,\n ops.CumulativeMean: ops.Mean,\n ops.CumulativeAny: ops.Any,\n ops.CumulativeAll: ops.All,\n}\n\n\ndef _cumulative_to_window(translator, expr, window):\n win = W.cumulative_window()\n win = win.group_by(window._group_by).order_by(window._order_by)\n\n op = expr.op()\n\n klass = _cumulative_to_reduction[type(op)]\n new_op = klass(*op.args)\n new_expr = expr._factory(new_op, name=expr._name)\n\n if type(new_op) in translator._rewrites:\n new_expr = translator._rewrites[type(new_op)](new_expr)\n\n return L.windowize_function(new_expr, win)\n\n\ndef _window(t, expr):\n op = expr.op()\n\n arg, window = op.args\n reduction = t.translate(arg)\n\n window_op = arg.op()\n\n _require_order_by = (\n ops.DenseRank,\n ops.MinRank,\n ops.NTile,\n ops.PercentRank,\n )\n\n if isinstance(window_op, ops.CumulativeOp):\n arg = _cumulative_to_window(t, arg, window)\n return t.translate(arg)\n\n if window.max_lookback is not None:\n raise NotImplementedError(\n 'Rows with max lookback is not implemented '\n 'for SQLAlchemy-based backends.'\n )\n\n # Some analytic functions need to have the expression of interest in\n # the ORDER BY part of the window clause\n if isinstance(window_op, _require_order_by) and not window._order_by:\n order_by = t.translate(window_op.args[0])\n else:\n order_by = list(map(t.translate, window._order_by))\n\n partition_by = list(map(t.translate, window._group_by))\n\n frame_clause_not_allowed = (\n ops.Lag,\n ops.Lead,\n ops.DenseRank,\n ops.MinRank,\n ops.NTile,\n ops.PercentRank,\n ops.RowNumber,\n )\n\n how = {'range': 'range_'}.get(window.how, window.how)\n preceding = window.preceding\n additional_params = (\n {}\n if isinstance(window_op, frame_clause_not_allowed)\n else {\n how: (\n -preceding if preceding is not None else preceding,\n window.following,\n )\n }\n )\n result = reduction.over(\n partition_by=partition_by, order_by=order_by, **additional_params\n )\n\n if isinstance(\n window_op, (ops.RowNumber, ops.DenseRank, ops.MinRank, ops.NTile)\n ):\n return result - 1\n else:\n return result\n\n\ndef _lag(t, expr):\n arg, offset, default = expr.op().args\n if default is not None:\n raise NotImplementedError()\n\n sa_arg = t.translate(arg)\n sa_offset = t.translate(offset) if offset is not None else 1\n return sa.func.lag(sa_arg, sa_offset)\n\n\ndef _lead(t, expr):\n arg, offset, default = expr.op().args\n if default is not None:\n raise NotImplementedError()\n sa_arg = t.translate(arg)\n sa_offset = t.translate(offset) if offset is not None else 1\n return sa.func.lead(sa_arg, sa_offset)\n\n\ndef _ntile(t, expr):\n op = expr.op()\n args = op.args\n arg, buckets = map(t.translate, args)\n return sa.func.ntile(buckets)\n\n\ndef _sort_key(t, expr):\n # We need to define this for window functions that have an order by\n by, ascending = expr.op().args\n sort_direction = sa.asc if ascending else sa.desc\n return sort_direction(t.translate(by))\n\n\nsqlalchemy_operation_registry: Dict[Any, Any] = {\n ops.And: fixed_arity(sql.and_, 2),\n ops.Or: fixed_arity(sql.or_, 2),\n ops.Not: unary(sa.not_),\n ops.Abs: unary(sa.func.abs),\n ops.Cast: _cast,\n ops.Coalesce: varargs(sa.func.coalesce),\n ops.NullIf: fixed_arity(sa.func.nullif, 2),\n ops.Contains: _contains,\n ops.NotContains: _not_contains,\n ops.Count: reduction(sa.func.count),\n ops.Sum: reduction(sa.func.sum),\n ops.Mean: reduction(sa.func.avg),\n ops.Min: reduction(sa.func.min),\n ops.Max: reduction(sa.func.max),\n ops.CountDistinct: _count_distinct,\n ops.GroupConcat: reduction(sa.func.group_concat),\n ops.Between: fixed_arity(sa.between, 3),\n ops.IsNull: _is_null,\n ops.NotNull: _not_null,\n ops.Negate: _negate,\n ops.Round: _round,\n ops.TypeOf: unary(sa.func.typeof),\n ops.Literal: _literal,\n ops.ValueList: _value_list,\n ops.NullLiteral: lambda *args: sa.null(),\n ops.SimpleCase: _simple_case,\n ops.SearchedCase: _searched_case,\n ops.TableColumn: _table_column,\n ops.TableArrayView: _table_array_view,\n ops.ExistsSubquery: _exists_subquery,\n ops.NotExistsSubquery: _exists_subquery,\n # miscellaneous varargs\n ops.Least: varargs(sa.func.least),\n ops.Greatest: varargs(sa.func.greatest),\n # string\n ops.LPad: fixed_arity(sa.func.lpad, 3),\n ops.RPad: fixed_arity(sa.func.rpad, 3),\n ops.Strip: unary(sa.func.trim),\n ops.LStrip: unary(sa.func.ltrim),\n ops.RStrip: unary(sa.func.rtrim),\n ops.Repeat: fixed_arity(sa.func.repeat, 2),\n ops.Reverse: unary(sa.func.reverse),\n ops.StrRight: fixed_arity(sa.func.right, 2),\n ops.Lowercase: unary(sa.func.lower),\n ops.Uppercase: unary(sa.func.upper),\n ops.StringAscii: unary(sa.func.ascii),\n ops.StringLength: unary(sa.func.length),\n ops.StringReplace: fixed_arity(sa.func.replace, 3),\n ops.StringSQLLike: _string_like,\n ops.StartsWith: _startswith,\n ops.EndsWith: _endswith,\n # math\n ops.Ln: unary(sa.func.ln),\n ops.Exp: unary(sa.func.exp),\n ops.Sign: unary(sa.func.sign),\n ops.Sqrt: unary(sa.func.sqrt),\n ops.Ceil: unary(sa.func.ceil),\n ops.Floor: unary(sa.func.floor),\n ops.Power: fixed_arity(sa.func.pow, 2),\n ops.FloorDivide: _floor_divide,\n # other\n ops.SortKey: _sort_key,\n}\n\n\n# TODO: unit tests for each of these\n_binary_ops = {\n # Binary arithmetic\n ops.Add: operator.add,\n ops.Subtract: operator.sub,\n ops.Multiply: operator.mul,\n # XXX `ops.Divide` is overwritten in `translator.py` with a custom\n # function `_true_divide`, but for some reason both are required\n ops.Divide: operator.truediv,\n ops.Modulus: operator.mod,\n # Comparisons\n ops.Equals: operator.eq,\n ops.NotEquals: operator.ne,\n ops.Less: operator.lt,\n ops.LessEqual: operator.le,\n ops.Greater: operator.gt,\n ops.GreaterEqual: operator.ge,\n ops.IdenticalTo: lambda x, y: x.op('IS NOT DISTINCT FROM')(y),\n # Boolean comparisons\n # TODO\n}\n\n\nsqlalchemy_window_functions_registry = {\n ops.Lag: _lag,\n ops.Lead: _lead,\n ops.NTile: _ntile,\n ops.FirstValue: unary(sa.func.first_value),\n ops.LastValue: unary(sa.func.last_value),\n ops.RowNumber: fixed_arity(lambda: sa.func.row_number(), 0),\n ops.DenseRank: unary(lambda arg: sa.func.dense_rank()),\n ops.MinRank: unary(lambda arg: sa.func.rank()),\n ops.PercentRank: unary(lambda arg: sa.func.percent_rank()),\n ops.WindowOp: _window,\n ops.CumulativeOp: _window,\n ops.CumulativeMax: unary(sa.func.max),\n ops.CumulativeMin: unary(sa.func.min),\n ops.CumulativeSum: unary(sa.func.sum),\n ops.CumulativeMean: unary(sa.func.avg),\n}\n\nif geospatial_supported:\n _geospatial_functions = {\n ops.GeoArea: unary(sa.func.ST_Area),\n ops.GeoAsBinary: unary(sa.func.ST_AsBinary),\n ops.GeoAsEWKB: unary(sa.func.ST_AsEWKB),\n ops.GeoAsEWKT: unary(sa.func.ST_AsEWKT),\n ops.GeoAsText: unary(sa.func.ST_AsText),\n ops.GeoAzimuth: fixed_arity(sa.func.ST_Azimuth, 2),\n ops.GeoBuffer: fixed_arity(sa.func.ST_Buffer, 2),\n ops.GeoCentroid: unary(sa.func.ST_Centroid),\n ops.GeoContains: fixed_arity(sa.func.ST_Contains, 2),\n ops.GeoContainsProperly: fixed_arity(sa.func.ST_Contains, 2),\n ops.GeoCovers: fixed_arity(sa.func.ST_Covers, 2),\n ops.GeoCoveredBy: fixed_arity(sa.func.ST_CoveredBy, 2),\n ops.GeoCrosses: fixed_arity(sa.func.ST_Crosses, 2),\n ops.GeoDFullyWithin: fixed_arity(sa.func.ST_DFullyWithin, 3),\n ops.GeoDifference: fixed_arity(sa.func.ST_Difference, 2),\n ops.GeoDisjoint: fixed_arity(sa.func.ST_Disjoint, 2),\n ops.GeoDistance: fixed_arity(sa.func.ST_Distance, 2),\n ops.GeoDWithin: fixed_arity(sa.func.ST_DWithin, 3),\n ops.GeoEndPoint: unary(sa.func.ST_EndPoint),\n ops.GeoEnvelope: unary(sa.func.ST_Envelope),\n ops.GeoEquals: fixed_arity(sa.func.ST_Equals, 2),\n ops.GeoGeometryN: fixed_arity(sa.func.ST_GeometryN, 2),\n ops.GeoGeometryType: unary(sa.func.ST_GeometryType),\n ops.GeoIntersection: fixed_arity(sa.func.ST_Intersection, 2),\n ops.GeoIntersects: fixed_arity(sa.func.ST_Intersects, 2),\n ops.GeoIsValid: unary(sa.func.ST_IsValid),\n ops.GeoLineLocatePoint: fixed_arity(sa.func.ST_LineLocatePoint, 2),\n ops.GeoLineMerge: unary(sa.func.ST_LineMerge),\n ops.GeoLineSubstring: fixed_arity(sa.func.ST_LineSubstring, 3),\n ops.GeoLength: unary(sa.func.ST_Length),\n ops.GeoNPoints: unary(sa.func.ST_NPoints),\n ops.GeoOrderingEquals: fixed_arity(sa.func.ST_OrderingEquals, 2),\n ops.GeoOverlaps: fixed_arity(sa.func.ST_Overlaps, 2),\n ops.GeoPerimeter: unary(sa.func.ST_Perimeter),\n ops.GeoSimplify: fixed_arity(sa.func.ST_Simplify, 3),\n ops.GeoSRID: unary(sa.func.ST_SRID),\n ops.GeoSetSRID: fixed_arity(sa.func.ST_SetSRID, 2),\n ops.GeoStartPoint: unary(sa.func.ST_StartPoint),\n ops.GeoTouches: fixed_arity(sa.func.ST_Touches, 2),\n ops.GeoTransform: fixed_arity(sa.func.ST_Transform, 2),\n ops.GeoUnaryUnion: unary(sa.func.ST_Union),\n ops.GeoUnion: fixed_arity(sa.func.ST_Union, 2),\n ops.GeoWithin: fixed_arity(sa.func.ST_Within, 2),\n ops.GeoX: unary(sa.func.ST_X),\n ops.GeoY: unary(sa.func.ST_Y),\n # Missing Geospatial ops:\n # ST_AsGML\n # ST_AsGeoJSON\n # ST_AsKML\n # ST_AsRaster\n # ST_AsSVG\n # ST_AsTWKB\n # ST_Distance_Sphere\n # ST_Dump\n # ST_DumpPoints\n # ST_GeogFromText\n # ST_GeomFromEWKB\n # ST_GeomFromEWKT\n # ST_GeomFromText\n }\n\n sqlalchemy_operation_registry.update(_geospatial_functions)\n\n\nfor _k, _v in _binary_ops.items():\n sqlalchemy_operation_registry[_k] = fixed_arity(_v, 2)\n", "path": "ibis/backends/base/sql/alchemy/registry.py" } ]
[ { "content": "import operator\nfrom typing import Any, Dict\n\nimport sqlalchemy as sa\nimport sqlalchemy.sql as sql\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.analysis as L\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nimport ibis.expr.window as W\n\nfrom .database import AlchemyTable\nfrom .geospatial import geospatial_supported\n\n\ndef variance_reduction(func_name):\n suffix = {'sample': 'samp', 'pop': 'pop'}\n\n def variance_compiler(t, expr):\n arg, how, where = expr.op().args\n\n if arg.type().equals(dt.boolean):\n arg = arg.cast('int32')\n\n func = getattr(\n sa.func, '{}_{}'.format(func_name, suffix.get(how, 'samp'))\n )\n\n if where is not None:\n arg = where.ifelse(arg, None)\n return func(t.translate(arg))\n\n return variance_compiler\n\n\ndef infix_op(infix_sym):\n def formatter(t, expr):\n op = expr.op()\n left, right = op.args\n\n left_arg = t.translate(left)\n right_arg = t.translate(right)\n return left_arg.op(infix_sym)(right_arg)\n\n return formatter\n\n\ndef fixed_arity(sa_func, arity):\n if isinstance(sa_func, str):\n sa_func = getattr(sa.func, sa_func)\n\n def formatter(t, expr):\n if arity != len(expr.op().args):\n raise com.IbisError('incorrect number of args')\n\n return _varargs_call(sa_func, t, expr)\n\n return formatter\n\n\ndef varargs(sa_func):\n def formatter(t, expr):\n op = expr.op()\n trans_args = [t.translate(arg) for arg in op.arg]\n return sa_func(*trans_args)\n\n return formatter\n\n\ndef _varargs_call(sa_func, t, expr):\n op = expr.op()\n trans_args = [t.translate(arg) for arg in op.args]\n return sa_func(*trans_args)\n\n\ndef get_sqla_table(ctx, table):\n if ctx.has_ref(table, parent_contexts=True):\n ctx_level = ctx\n sa_table = ctx_level.get_ref(table)\n while sa_table is None and ctx_level.parent is not ctx_level:\n ctx_level = ctx_level.parent\n sa_table = ctx_level.get_ref(table)\n else:\n op = table.op()\n if isinstance(op, AlchemyTable):\n sa_table = op.sqla_table\n else:\n sa_table = ctx.get_compiled_expr(table)\n\n return sa_table\n\n\ndef _table_column(t, expr):\n op = expr.op()\n ctx = t.context\n table = op.table\n\n sa_table = get_sqla_table(ctx, table)\n out_expr = getattr(sa_table.c, op.name)\n\n # If the column does not originate from the table set in the current SELECT\n # context, we should format as a subquery\n if t.permit_subquery and ctx.is_foreign_expr(table):\n return sa.select([out_expr])\n\n return out_expr\n\n\ndef _table_array_view(t, expr):\n ctx = t.context\n table = ctx.get_compiled_expr(expr.op().table)\n return table\n\n\ndef _exists_subquery(t, expr):\n from .query_builder import AlchemyCompiler\n\n op = expr.op()\n ctx = t.context\n\n filtered = op.foreign_table.filter(op.predicates).projection(\n [ir.literal(1).name(ir.unnamed)]\n )\n\n sub_ctx = ctx.subcontext()\n clause = AlchemyCompiler.to_sql(filtered, sub_ctx, exists=True)\n\n if isinstance(op, ops.NotExistsSubquery):\n clause = sa.not_(clause)\n\n return clause\n\n\ndef _cast(t, expr):\n op = expr.op()\n arg, target_type = op.args\n sa_arg = t.translate(arg)\n sa_type = t.get_sqla_type(target_type)\n\n if isinstance(arg, ir.CategoryValue) and target_type == 'int32':\n return sa_arg\n else:\n return sa.cast(sa_arg, sa_type)\n\n\ndef _contains(t, expr):\n op = expr.op()\n\n left, right = (t.translate(arg) for arg in op.args)\n\n return left.in_(right)\n\n\ndef _not_contains(t, expr):\n return sa.not_(_contains(t, expr))\n\n\ndef reduction(sa_func):\n def formatter(t, expr):\n op = expr.op()\n *args, where = op.args\n\n return _reduction_format(t, sa_func, where, *args)\n\n return formatter\n\n\ndef _reduction_format(t, sa_func, where, arg, *args):\n if where is not None:\n arg = t.translate(where.ifelse(arg, ibis.NA))\n else:\n arg = t.translate(arg)\n\n return sa_func(arg, *map(t.translate, args))\n\n\ndef _literal(t, expr):\n dtype = expr.type()\n value = expr.op().value\n\n if isinstance(dtype, dt.Set):\n return list(map(sa.literal, value))\n\n return sa.literal(value)\n\n\ndef _value_list(t, expr):\n return [t.translate(x) for x in expr.op().values]\n\n\ndef _is_null(t, expr):\n arg = t.translate(expr.op().args[0])\n return arg.is_(sa.null())\n\n\ndef _not_null(t, expr):\n arg = t.translate(expr.op().args[0])\n return arg.isnot(sa.null())\n\n\ndef _round(t, expr):\n op = expr.op()\n arg, digits = op.args\n sa_arg = t.translate(arg)\n\n f = sa.func.round\n\n if digits is not None:\n sa_digits = t.translate(digits)\n return f(sa_arg, sa_digits)\n else:\n return f(sa_arg)\n\n\ndef _floor_divide(t, expr):\n left, right = map(t.translate, expr.op().args)\n return sa.func.floor(left / right)\n\n\ndef _count_distinct(t, expr):\n arg, where = expr.op().args\n\n if where is not None:\n sa_arg = t.translate(where.ifelse(arg, None))\n else:\n sa_arg = t.translate(arg)\n\n return sa.func.count(sa_arg.distinct())\n\n\ndef _simple_case(t, expr):\n op = expr.op()\n\n cases = [op.base == case for case in op.cases]\n return _translate_case(t, cases, op.results, op.default)\n\n\ndef _searched_case(t, expr):\n op = expr.op()\n return _translate_case(t, op.cases, op.results, op.default)\n\n\ndef _translate_case(t, cases, results, default):\n case_args = [t.translate(arg) for arg in cases]\n result_args = [t.translate(arg) for arg in results]\n\n whens = zip(case_args, result_args)\n default = t.translate(default)\n\n return sa.case(list(whens), else_=default)\n\n\ndef _negate(t, expr):\n op = expr.op()\n (arg,) = map(t.translate, op.args)\n return sa.not_(arg) if isinstance(expr, ir.BooleanValue) else -arg\n\n\ndef unary(sa_func):\n return fixed_arity(sa_func, 1)\n\n\ndef _string_like(t, expr):\n arg, pattern, escape = expr.op().args\n result = t.translate(arg).like(t.translate(pattern), escape=escape)\n return result\n\n\ndef _startswith(t, expr):\n arg, start = expr.op().args\n return t.translate(arg).startswith(t.translate(start))\n\n\ndef _endswith(t, expr):\n arg, start = expr.op().args\n return t.translate(arg).endswith(t.translate(start))\n\n\n_cumulative_to_reduction = {\n ops.CumulativeSum: ops.Sum,\n ops.CumulativeMin: ops.Min,\n ops.CumulativeMax: ops.Max,\n ops.CumulativeMean: ops.Mean,\n ops.CumulativeAny: ops.Any,\n ops.CumulativeAll: ops.All,\n}\n\n\ndef _cumulative_to_window(translator, expr, window):\n win = W.cumulative_window()\n win = win.group_by(window._group_by).order_by(window._order_by)\n\n op = expr.op()\n\n klass = _cumulative_to_reduction[type(op)]\n new_op = klass(*op.args)\n new_expr = expr._factory(new_op, name=expr._name)\n\n if type(new_op) in translator._rewrites:\n new_expr = translator._rewrites[type(new_op)](new_expr)\n\n return L.windowize_function(new_expr, win)\n\n\ndef _window(t, expr):\n op = expr.op()\n\n arg, window = op.args\n reduction = t.translate(arg)\n\n window_op = arg.op()\n\n _require_order_by = (\n ops.DenseRank,\n ops.MinRank,\n ops.NTile,\n ops.PercentRank,\n )\n\n if isinstance(window_op, ops.CumulativeOp):\n arg = _cumulative_to_window(t, arg, window)\n return t.translate(arg)\n\n if window.max_lookback is not None:\n raise NotImplementedError(\n 'Rows with max lookback is not implemented '\n 'for SQLAlchemy-based backends.'\n )\n\n # Some analytic functions need to have the expression of interest in\n # the ORDER BY part of the window clause\n if isinstance(window_op, _require_order_by) and not window._order_by:\n order_by = t.translate(window_op.args[0])\n else:\n order_by = list(map(t.translate, window._order_by))\n\n partition_by = list(map(t.translate, window._group_by))\n\n frame_clause_not_allowed = (\n ops.Lag,\n ops.Lead,\n ops.DenseRank,\n ops.MinRank,\n ops.NTile,\n ops.PercentRank,\n ops.RowNumber,\n )\n\n how = {'range': 'range_'}.get(window.how, window.how)\n preceding = window.preceding\n additional_params = (\n {}\n if isinstance(window_op, frame_clause_not_allowed)\n else {\n how: (\n -preceding if preceding is not None else preceding,\n window.following,\n )\n }\n )\n result = reduction.over(\n partition_by=partition_by, order_by=order_by, **additional_params\n )\n\n if isinstance(\n window_op, (ops.RowNumber, ops.DenseRank, ops.MinRank, ops.NTile)\n ):\n return result - 1\n else:\n return result\n\n\ndef _lag(t, expr):\n arg, offset, default = expr.op().args\n if default is not None:\n raise NotImplementedError()\n\n sa_arg = t.translate(arg)\n sa_offset = t.translate(offset) if offset is not None else 1\n return sa.func.lag(sa_arg, sa_offset)\n\n\ndef _lead(t, expr):\n arg, offset, default = expr.op().args\n if default is not None:\n raise NotImplementedError()\n sa_arg = t.translate(arg)\n sa_offset = t.translate(offset) if offset is not None else 1\n return sa.func.lead(sa_arg, sa_offset)\n\n\ndef _ntile(t, expr):\n op = expr.op()\n args = op.args\n arg, buckets = map(t.translate, args)\n return sa.func.ntile(buckets)\n\n\ndef _sort_key(t, expr):\n # We need to define this for window functions that have an order by\n by, ascending = expr.op().args\n sort_direction = sa.asc if ascending else sa.desc\n return sort_direction(t.translate(by))\n\n\nsqlalchemy_operation_registry: Dict[Any, Any] = {\n ops.And: fixed_arity(sql.and_, 2),\n ops.Or: fixed_arity(sql.or_, 2),\n ops.Not: unary(sa.not_),\n ops.Abs: unary(sa.func.abs),\n ops.Cast: _cast,\n ops.Coalesce: varargs(sa.func.coalesce),\n ops.NullIf: fixed_arity(sa.func.nullif, 2),\n ops.Contains: _contains,\n ops.NotContains: _not_contains,\n ops.Count: reduction(sa.func.count),\n ops.Sum: reduction(sa.func.sum),\n ops.Mean: reduction(sa.func.avg),\n ops.Min: reduction(sa.func.min),\n ops.Max: reduction(sa.func.max),\n ops.CountDistinct: _count_distinct,\n ops.GroupConcat: reduction(sa.func.group_concat),\n ops.Between: fixed_arity(sa.between, 3),\n ops.IsNull: _is_null,\n ops.NotNull: _not_null,\n ops.Negate: _negate,\n ops.Round: _round,\n ops.TypeOf: unary(sa.func.typeof),\n ops.Literal: _literal,\n ops.ValueList: _value_list,\n ops.NullLiteral: lambda *args: sa.null(),\n ops.SimpleCase: _simple_case,\n ops.SearchedCase: _searched_case,\n ops.TableColumn: _table_column,\n ops.TableArrayView: _table_array_view,\n ops.ExistsSubquery: _exists_subquery,\n ops.NotExistsSubquery: _exists_subquery,\n # miscellaneous varargs\n ops.Least: varargs(sa.func.least),\n ops.Greatest: varargs(sa.func.greatest),\n # string\n ops.LPad: fixed_arity(sa.func.lpad, 3),\n ops.RPad: fixed_arity(sa.func.rpad, 3),\n ops.Strip: unary(sa.func.trim),\n ops.LStrip: unary(sa.func.ltrim),\n ops.RStrip: unary(sa.func.rtrim),\n ops.Repeat: fixed_arity(sa.func.repeat, 2),\n ops.Reverse: unary(sa.func.reverse),\n ops.StrRight: fixed_arity(sa.func.right, 2),\n ops.Lowercase: unary(sa.func.lower),\n ops.Uppercase: unary(sa.func.upper),\n ops.StringAscii: unary(sa.func.ascii),\n ops.StringLength: unary(sa.func.length),\n ops.StringReplace: fixed_arity(sa.func.replace, 3),\n ops.StringSQLLike: _string_like,\n ops.StartsWith: _startswith,\n ops.EndsWith: _endswith,\n # math\n ops.Ln: unary(sa.func.ln),\n ops.Exp: unary(sa.func.exp),\n ops.Sign: unary(sa.func.sign),\n ops.Sqrt: unary(sa.func.sqrt),\n ops.Ceil: unary(sa.func.ceil),\n ops.Floor: unary(sa.func.floor),\n ops.Power: fixed_arity(sa.func.pow, 2),\n ops.FloorDivide: _floor_divide,\n # other\n ops.SortKey: _sort_key,\n}\n\n\n# TODO: unit tests for each of these\n_binary_ops = {\n # Binary arithmetic\n ops.Add: operator.add,\n ops.Subtract: operator.sub,\n ops.Multiply: operator.mul,\n # XXX `ops.Divide` is overwritten in `translator.py` with a custom\n # function `_true_divide`, but for some reason both are required\n ops.Divide: operator.truediv,\n ops.Modulus: operator.mod,\n # Comparisons\n ops.Equals: operator.eq,\n ops.NotEquals: operator.ne,\n ops.Less: operator.lt,\n ops.LessEqual: operator.le,\n ops.Greater: operator.gt,\n ops.GreaterEqual: operator.ge,\n ops.IdenticalTo: lambda x, y: x.op('IS NOT DISTINCT FROM')(y),\n # Boolean comparisons\n # TODO\n}\n\n\nsqlalchemy_window_functions_registry = {\n ops.Lag: _lag,\n ops.Lead: _lead,\n ops.NTile: _ntile,\n ops.FirstValue: unary(sa.func.first_value),\n ops.LastValue: unary(sa.func.last_value),\n ops.RowNumber: fixed_arity(lambda: sa.func.row_number(), 0),\n ops.DenseRank: unary(lambda arg: sa.func.dense_rank()),\n ops.MinRank: unary(lambda arg: sa.func.rank()),\n ops.PercentRank: unary(lambda arg: sa.func.percent_rank()),\n ops.WindowOp: _window,\n ops.CumulativeOp: _window,\n ops.CumulativeMax: unary(sa.func.max),\n ops.CumulativeMin: unary(sa.func.min),\n ops.CumulativeSum: unary(sa.func.sum),\n ops.CumulativeMean: unary(sa.func.avg),\n}\n\nif geospatial_supported:\n _geospatial_functions = {\n ops.GeoArea: unary(sa.func.ST_Area),\n ops.GeoAsBinary: unary(sa.func.ST_AsBinary),\n ops.GeoAsEWKB: unary(sa.func.ST_AsEWKB),\n ops.GeoAsEWKT: unary(sa.func.ST_AsEWKT),\n ops.GeoAsText: unary(sa.func.ST_AsText),\n ops.GeoAzimuth: fixed_arity(sa.func.ST_Azimuth, 2),\n ops.GeoBuffer: fixed_arity(sa.func.ST_Buffer, 2),\n ops.GeoCentroid: unary(sa.func.ST_Centroid),\n ops.GeoContains: fixed_arity(sa.func.ST_Contains, 2),\n ops.GeoContainsProperly: fixed_arity(sa.func.ST_Contains, 2),\n ops.GeoCovers: fixed_arity(sa.func.ST_Covers, 2),\n ops.GeoCoveredBy: fixed_arity(sa.func.ST_CoveredBy, 2),\n ops.GeoCrosses: fixed_arity(sa.func.ST_Crosses, 2),\n ops.GeoDFullyWithin: fixed_arity(sa.func.ST_DFullyWithin, 3),\n ops.GeoDifference: fixed_arity(sa.func.ST_Difference, 2),\n ops.GeoDisjoint: fixed_arity(sa.func.ST_Disjoint, 2),\n ops.GeoDistance: fixed_arity(sa.func.ST_Distance, 2),\n ops.GeoDWithin: fixed_arity(sa.func.ST_DWithin, 3),\n ops.GeoEndPoint: unary(sa.func.ST_EndPoint),\n ops.GeoEnvelope: unary(sa.func.ST_Envelope),\n ops.GeoEquals: fixed_arity(sa.func.ST_Equals, 2),\n ops.GeoGeometryN: fixed_arity(sa.func.ST_GeometryN, 2),\n ops.GeoGeometryType: unary(sa.func.ST_GeometryType),\n ops.GeoIntersection: fixed_arity(sa.func.ST_Intersection, 2),\n ops.GeoIntersects: fixed_arity(sa.func.ST_Intersects, 2),\n ops.GeoIsValid: unary(sa.func.ST_IsValid),\n ops.GeoLineLocatePoint: fixed_arity(sa.func.ST_LineLocatePoint, 2),\n ops.GeoLineMerge: unary(sa.func.ST_LineMerge),\n ops.GeoLineSubstring: fixed_arity(sa.func.ST_LineSubstring, 3),\n ops.GeoLength: unary(sa.func.ST_Length),\n ops.GeoNPoints: unary(sa.func.ST_NPoints),\n ops.GeoOrderingEquals: fixed_arity(sa.func.ST_OrderingEquals, 2),\n ops.GeoOverlaps: fixed_arity(sa.func.ST_Overlaps, 2),\n ops.GeoPerimeter: unary(sa.func.ST_Perimeter),\n ops.GeoSimplify: fixed_arity(sa.func.ST_Simplify, 3),\n ops.GeoSRID: unary(sa.func.ST_SRID),\n ops.GeoSetSRID: fixed_arity(sa.func.ST_SetSRID, 2),\n ops.GeoStartPoint: unary(sa.func.ST_StartPoint),\n ops.GeoTouches: fixed_arity(sa.func.ST_Touches, 2),\n ops.GeoTransform: fixed_arity(sa.func.ST_Transform, 2),\n ops.GeoUnaryUnion: unary(sa.func.ST_Union),\n ops.GeoUnion: fixed_arity(sa.func.ST_Union, 2),\n ops.GeoWithin: fixed_arity(sa.func.ST_Within, 2),\n ops.GeoX: unary(sa.func.ST_X),\n ops.GeoY: unary(sa.func.ST_Y),\n # Missing Geospatial ops:\n # ST_AsGML\n # ST_AsGeoJSON\n # ST_AsKML\n # ST_AsRaster\n # ST_AsSVG\n # ST_AsTWKB\n # ST_Distance_Sphere\n # ST_Dump\n # ST_DumpPoints\n # ST_GeogFromText\n # ST_GeomFromEWKB\n # ST_GeomFromEWKT\n # ST_GeomFromText\n }\n\n sqlalchemy_operation_registry.update(_geospatial_functions)\n\n\nfor _k, _v in _binary_ops.items():\n sqlalchemy_operation_registry[_k] = fixed_arity(_v, 2)\n", "path": "ibis/backends/base/sql/alchemy/registry.py" } ]
diff --git a/ibis/backends/base/sql/alchemy/registry.py b/ibis/backends/base/sql/alchemy/registry.py index f3d653f2bd1c..d32de4c3bd54 100644 --- a/ibis/backends/base/sql/alchemy/registry.py +++ b/ibis/backends/base/sql/alchemy/registry.py @@ -77,7 +77,7 @@ def _varargs_call(sa_func, t, expr): def get_sqla_table(ctx, table): - if ctx.has_ref(table): + if ctx.has_ref(table, parent_contexts=True): ctx_level = ctx sa_table = ctx_level.get_ref(table) while sa_table is None and ctx_level.parent is not ctx_level: diff --git a/ibis/tests/sql/test_sqlalchemy.py b/ibis/tests/sql/test_sqlalchemy.py index 8ce295fdc584..e03a393ed00e 100644 --- a/ibis/tests/sql/test_sqlalchemy.py +++ b/ibis/tests/sql/test_sqlalchemy.py @@ -622,6 +622,87 @@ def test_sort_aggregation_translation_failure(self): self._compare_sqla(expr, ex) + def test_where_correlated_subquery_with_join(self): + # GH3163 + # ibis code + part = ibis.table([("p_partkey", "int64")], name="part") + partsupp = ibis.table( + [ + ("ps_partkey", "int64"), + ("ps_supplycost", "float64"), + ("ps_suppkey", "int64"), + ], + name="partsupp", + ) + supplier = ibis.table([("s_suppkey", "int64")], name="supplier") + + q = part.join(partsupp, part.p_partkey == partsupp.ps_partkey) + q = q[ + part.p_partkey, + partsupp.ps_supplycost, + ] + subq = partsupp.join( + supplier, supplier.s_suppkey == partsupp.ps_suppkey + ) + subq = subq.projection([partsupp.ps_partkey, partsupp.ps_supplycost]) + subq = subq[subq.ps_partkey == q.p_partkey] + + expr = q[q.ps_supplycost == subq.ps_supplycost.min()] + + # sqlalchemy code + part = sa.table("part", sa.column("p_partkey")) + supplier = sa.table("supplier", sa.column("s_suppkey")) + partsupp = sa.table( + "partsupp", + sa.column("ps_partkey"), + sa.column("ps_supplycost"), + sa.column("ps_suppkey"), + ) + + part_t1 = part.alias("t1") + partsupp_t2 = partsupp.alias("t2") + + t0 = ( + sa.select([part_t1.c.p_partkey, partsupp_t2.c.ps_supplycost]) + .select_from( + part_t1.join( + partsupp_t2, + onclause=part_t1.c.p_partkey == partsupp_t2.c.ps_partkey, + ) + ) + .alias("t0") + ) + + partsupp_t2 = partsupp.alias("t2") + supplier_t5 = supplier.alias("t5") + t3 = ( + sa.select([partsupp_t2.c.ps_partkey, partsupp_t2.c.ps_supplycost]) + .select_from( + partsupp_t2.join( + supplier_t5, + onclause=supplier_t5.c.s_suppkey + == partsupp_t2.c.ps_suppkey, + ) + ) + .alias("t3") + ) + + ex = ( + sa.select([t0.c.p_partkey, t0.c.ps_supplycost]) + .select_from(t0) + .where( + t0.c.ps_supplycost + == ( + sa.select([sa.func.min(t3.c.ps_supplycost).label("min")]) + .select_from(t3) + .where(t3.c.ps_partkey == t0.c.p_partkey) + .as_scalar() + ) + ) + ) + + self._compare_sqla(expr, ex) + def _compare_sqla(self, expr, sqla): context = AlchemyContext(compiler=AlchemyCompiler) result_sqla = AlchemyCompiler.to_sql(expr, context)
enthought__chaco-900
Removing a data source makes `DataView` objects inconsistent **Problem Description** Removing a data source from a `DataView` instance leaves it internally inconsistent. **Reproduction Steps:** This snippet demonstrates the inconsistency: ```python import numpy as np from chaco.api import DataView from chaco.grid_data_source import GridDataSource source = GridDataSource(xdata=np.array([1, 2, 4]), ydata=np.array([7, 8, 9])) component = DataView() print(f"Initial output: {component.map_screen(np.array([0, 1]))}") component.range2d.add(source) # (1) component.map_screen(np.array([0, 1])) # (2) component.range2d.remove(source) # (3) print(f"Final output: {component.map_screen(np.array([0, 1]))}") ``` Output: ``` Initial output: [[0. 0.]] Final output: [inf inf] ``` Initially, without any data sources, the output of `component.map_screen` is `[[0, 0]]`. When a data source is added and then removed, the bounds on `component.value_mapper.range` to refreshed to (-inf, inf). However, `component.value_mapper._cache_valid` isn't reverted from `True` to `False`. (It's set to `True` as a consequence of (2) in the snippet above.) As a result, subsequent calls to `component.map_screen` produce `inf`s. This `inf`s in turn have the potential to turn into `nan`s, raising exceptions in unexpected places downstream. **Expected behavior:** I *think* the output should be [[0., 0.]] again, (or more precisely, `component.value_mapper._cache_valid` should revert to `False` if `component.value_mapper.range.refresh()` is called. However, I'm not sure and opening this issue for discussion. **OS, Python version:** macOS Catalina 10.15.7, Python 3.6.13
[ { "content": "# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only under\n# the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n#\n# Thanks for using Enthought open source!\n\n\"\"\"\nDefines the DataRange1D class.\n\"\"\"\n\n\n# Major library imports\nfrom math import ceil, floor, log\n\nfrom numpy import compress, errstate, inf, isinf, isnan, ndarray\n\n# Enthought library imports\nfrom traits.api import (\n Bool, CFloat, Constant, Enum, Float, Property, Callable, Union)\n\n# Local relative imports\nfrom .base import arg_find_runs\nfrom .base_data_range import BaseDataRange\nfrom .ticks import heckbert_interval\n\n\nclass DataRange1D(BaseDataRange):\n \"\"\"Represents a 1-D data range.\"\"\"\n\n #: The actual value of the lower bound of this range (overrides\n #: AbstractDataRange). To set it, use **low_setting**.\n low = Property\n #: The actual value of the upper bound of this range (overrides\n #: AbstractDataRange). To set it, use **high_setting**.\n high = Property\n\n #: Property for the lower bound of this range (overrides AbstractDataRange).\n #:\n #: * 'auto': The lower bound is automatically set at or below the minimum\n #: of the data.\n #: * 'track': The lower bound tracks the upper bound by **tracking_amount**.\n #: * CFloat: An explicit value for the lower bound\n low_setting = Property(Union(Constant(\"auto\"), Constant(\"track\"), CFloat))\n #: Property for the upper bound of this range (overrides AbstractDataRange).\n #:\n #: * 'auto': The upper bound is automatically set at or above the maximum\n #: of the data.\n #: * 'track': The upper bound tracks the lower bound by **tracking_amount**.\n #: * CFloat: An explicit value for the upper bound\n high_setting = Property(Union(Constant(\"auto\"), Constant(\"track\"), CFloat))\n\n #: Do \"auto\" bounds imply an exact fit to the data? If False,\n #: they pad a little bit of margin on either side.\n tight_bounds = Bool(True)\n\n #: A user supplied function returning the proper bounding interval.\n #: bounds_func takes (data_low, data_high, margin, tight_bounds)\n #: and returns (low, high)\n bounds_func = Callable\n\n #: The amount of margin to place on either side of the data, expressed as\n #: a percentage of the full data width\n margin = Float(0.05)\n\n #: The minimum percentage difference between low and high. That is,\n #: (high-low) >= epsilon * low.\n #: Used to be 1.0e-20 but chaco cannot plot at such a precision!\n epsilon = CFloat(1.0e-10)\n\n #: When either **high** or **low** tracks the other, track by this amount.\n default_tracking_amount = CFloat(20.0)\n\n #: The current tracking amount. This value changes with zooming.\n tracking_amount = default_tracking_amount\n\n #: Default tracking state. This value is used when self.reset() is called.\n #:\n #: * 'auto': Both bounds reset to 'auto'.\n #: * 'high_track': The high bound resets to 'track', and the low bound\n #: resets to 'auto'.\n #: * 'low_track': The low bound resets to 'track', and the high bound\n #: resets to 'auto'.\n default_state = Enum(\"auto\", \"high_track\", \"low_track\")\n\n #: FIXME: this attribute is not used anywhere, is it safe to remove it?\n #: Is this range dependent upon another range?\n fit_to_subset = Bool(False)\n\n # ------------------------------------------------------------------------\n # Private traits\n # ------------------------------------------------------------------------\n\n # The \"_setting\" attributes correspond to what the user has \"set\"; the\n # \"_value\" attributes are the actual numerical values for the given\n # setting.\n\n # The user-specified low setting.\n _low_setting = Union(Enum(\"auto\", \"track\"), CFloat)\n # The actual numerical value for the low setting.\n _low_value = CFloat(-inf)\n # The user-specified high setting.\n _high_setting = Union(Enum(\"auto\", \"track\"), CFloat)\n # The actual numerical value for the high setting.\n _high_value = CFloat(inf)\n\n # ------------------------------------------------------------------------\n # AbstractRange interface\n # ------------------------------------------------------------------------\n\n def clip_data(self, data):\n \"\"\"Returns a list of data values that are within the range.\n\n Implements AbstractDataRange.\n \"\"\"\n return compress(self.mask_data(data), data)\n\n def mask_data(self, data):\n \"\"\"Returns a mask array, indicating whether values in the given array\n are inside the range.\n\n Implements AbstractDataRange.\n \"\"\"\n with errstate(invalid=\"ignore\"):\n # Running under context because the data array may contain NaNs.\n # These are strictly invalid for comparison and Numpy would emit\n # a warning. Since we are happy with the default behavior (NaNs\n # become \"False\" in the mask), we silence the warning.\n mask = (data.view(ndarray) >= self._low_value) & (\n data.view(ndarray) <= self._high_value\n )\n return mask\n\n def bound_data(self, data):\n \"\"\"Returns a tuple of indices for the start and end of the first run\n of *data* that falls within the range.\n\n Implements AbstractDataRange.\n \"\"\"\n mask = self.mask_data(data)\n runs = arg_find_runs(mask, \"flat\")\n # Since runs of \"0\" are also considered runs, we have to cycle through\n # until we find the first run of \"1\"s.\n for run in runs:\n if mask[run[0]] == 1:\n # arg_find_runs returns 1 past the end\n return run[0], run[1] - 1\n return (0, 0)\n\n def set_bounds(self, low, high):\n \"\"\"Sets all the bounds of the range simultaneously.\n\n Implements AbstractDataRange.\n \"\"\"\n if low == \"track\":\n # Set the high setting first\n result_high = self._do_set_high_setting(high, fire_event=False)\n result_low = self._do_set_low_setting(low, fire_event=False)\n result = result_low or result_high\n else:\n # Either set low first or order doesn't matter\n result_low = self._do_set_low_setting(low, fire_event=False)\n result_high = self._do_set_high_setting(high, fire_event=False)\n result = result_high or result_low\n if result:\n self.updated = result\n\n def scale_tracking_amount(self, multiplier):\n \"\"\"Sets the **tracking_amount** to a new value, scaled by *multiplier*.\"\"\"\n self.tracking_amount = self.tracking_amount * multiplier\n self._do_track()\n\n def set_tracking_amount(self, amount):\n \"\"\"Sets the **tracking_amount** to a new value, *amount*.\"\"\"\n self.tracking_amount = amount\n self._do_track()\n\n def set_default_tracking_amount(self, amount):\n \"\"\"Sets the **default_tracking_amount** to a new value, *amount*.\"\"\"\n self.default_tracking_amount = amount\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n\n def reset(self):\n \"\"\"Resets the bounds of this range, based on **default_state**.\"\"\"\n # need to maintain 'track' setting\n if self.default_state == \"auto\":\n self._high_setting = \"auto\"\n self._low_setting = \"auto\"\n elif self.default_state == \"low_track\":\n self._high_setting = \"auto\"\n self._low_setting = \"track\"\n elif self.default_state == \"high_track\":\n self._high_setting = \"track\"\n self._low_setting = \"auto\"\n self._refresh_bounds()\n self.tracking_amount = self.default_tracking_amount\n\n def refresh(self, event=None):\n \"\"\"If any of the bounds is 'auto', this method refreshes the actual\n low and high values from the set of the view filters' data sources.\n \"\"\"\n if (\"auto\" in (self._low_setting, self._high_setting)) or (\n \"track\" in (self._low_setting, self._high_setting)\n ):\n # If the user has hard-coded bounds, then refresh() doesn't do\n # anything.\n self._refresh_bounds()\n else:\n return\n\n # ------------------------------------------------------------------------\n # Private methods (getters and setters)\n # ------------------------------------------------------------------------\n\n def _get_low(self):\n return float(self._low_value)\n\n def _set_low(self, val):\n return self._set_low_setting(val)\n\n def _get_low_setting(self):\n return self._low_setting\n\n def _do_set_low_setting(self, val, fire_event=True):\n \"\"\"\n Returns\n -------\n If fire_event is False and the change would have fired an event, returns\n the tuple of the new low and high values. Otherwise returns None. In\n particular, if fire_event is True, it always returns None.\n \"\"\"\n new_values = None\n if self._low_setting != val:\n\n # Save the new setting.\n self._low_setting = val\n\n # If val is 'auto' or 'track', get the corresponding numerical\n # value.\n if val == \"auto\":\n if len(self.sources) > 0:\n val = min(\n [source.get_bounds()[0] for source in self.sources]\n )\n else:\n val = -inf\n elif val == \"track\":\n if len(self.sources) > 0 or self._high_setting != \"auto\":\n val = self._high_value - self.tracking_amount\n else:\n val = -inf\n\n # val is now a numerical value. If it is the same as the current\n # value, there is nothing to do.\n if self._low_value != val:\n self._low_value = val\n if self._high_setting == \"track\":\n self._high_value = val + self.tracking_amount\n if fire_event:\n self.updated = (self._low_value, self._high_value)\n else:\n new_values = (self._low_value, self._high_value)\n\n return new_values\n\n def _set_low_setting(self, val):\n self._do_set_low_setting(val, True)\n\n def _get_high(self):\n return float(self._high_value)\n\n def _set_high(self, val):\n return self._set_high_setting(val)\n\n def _get_high_setting(self):\n return self._high_setting\n\n def _do_set_high_setting(self, val, fire_event=True):\n \"\"\"\n Returns\n -------\n If fire_event is False and the change would have fired an event, returns\n the tuple of the new low and high values. Otherwise returns None. In\n particular, if fire_event is True, it always returns None.\n \"\"\"\n new_values = None\n if self._high_setting != val:\n\n # Save the new setting.\n self._high_setting = val\n\n # If val is 'auto' or 'track', get the corresponding numerical\n # value.\n if val == \"auto\":\n if len(self.sources) > 0:\n val = max(\n [source.get_bounds()[1] for source in self.sources]\n )\n else:\n val = inf\n elif val == \"track\":\n if len(self.sources) > 0 or self._low_setting != \"auto\":\n val = self._low_value + self.tracking_amount\n else:\n val = inf\n\n # val is now a numerical value. If it is the same as the current\n # value, there is nothing to do.\n if self._high_value != val:\n self._high_value = val\n if self._low_setting == \"track\":\n self._low_value = val - self.tracking_amount\n if fire_event:\n self.updated = (self._low_value, self._high_value)\n else:\n new_values = (self._low_value, self._high_value)\n\n return new_values\n\n def _set_high_setting(self, val):\n self._do_set_high_setting(val, True)\n\n def _refresh_bounds(self):\n null_bounds = False\n if len(self.sources) == 0:\n null_bounds = True\n else:\n bounds_list = [\n source.get_bounds()\n for source in self.sources\n if source.get_size() > 0\n ]\n\n if len(bounds_list) == 0:\n null_bounds = True\n\n if null_bounds:\n # If we have no sources and our settings are \"auto\", then reset our\n # bounds to infinity; otherwise, set the _value to the corresponding\n # setting.\n if self._low_setting in (\"auto\", \"track\"):\n self._low_value = -inf\n else:\n self._low_value = self._low_setting\n if self._high_setting in (\"auto\", \"track\"):\n self._high_value = inf\n else:\n self._high_value = self._high_setting\n return\n else:\n mins, maxes = zip(*bounds_list)\n\n low_start, high_start = calc_bounds(\n self._low_setting,\n self._high_setting,\n mins,\n maxes,\n self.epsilon,\n self.tight_bounds,\n margin=self.margin,\n track_amount=self.tracking_amount,\n bounds_func=self.bounds_func,\n )\n\n if (self._low_value != low_start) or (self._high_value != high_start):\n self._low_value = low_start\n self._high_value = high_start\n self.updated = (self._low_value, self._high_value)\n\n def _do_track(self):\n changed = False\n if self._low_setting == \"track\":\n new_value = self._high_value - self.tracking_amount\n if self._low_value != new_value:\n self._low_value = new_value\n changed = True\n elif self._high_setting == \"track\":\n new_value = self._low_value + self.tracking_amount\n if self._high_value != new_value:\n self._high_value = new_value\n changed = True\n if changed:\n self.updated = (self._low_value, self._high_value)\n\n # ------------------------------------------------------------------------\n # Event handlers\n # ------------------------------------------------------------------------\n\n def _sources_items_changed(self, event):\n self.refresh()\n for source in event.removed:\n source.observe(self.refresh, \"data_changed\", remove=True)\n for source in event.added:\n source.observe(self.refresh, \"data_changed\")\n\n def _sources_changed(self, old, new):\n self.refresh()\n for source in old:\n source.observe(self.refresh, \"data_changed\", remove=True)\n for source in new:\n source.observe(self.refresh, \"data_changed\")\n\n # ------------------------------------------------------------------------\n # Serialization interface\n # ------------------------------------------------------------------------\n\n def _post_load(self):\n self._sources_changed(None, self.sources)\n\n\n# method to calculate bounds for a given 1-dimensional set of data\ndef calc_bounds(\n low_set,\n high_set,\n mins,\n maxes,\n epsilon,\n tight_bounds,\n margin=0.08,\n track_amount=0,\n bounds_func=None,\n):\n \"\"\"Calculates bounds for a given 1-D set of data.\n\n Parameters\n ----------\n low_set : 'auto', 'track', or number\n Current low setting\n high_set : 'auto', 'track', or number\n Current high setting\n mins : list of numbers\n Potential minima.\n maxes : list\n Potential maxima.\n epsilon : number\n Minimum percentage difference between bounds\n tight_bounds : Boolean\n Do 'auto' bounds imply an exact fit to the data? If False, they pad a\n little bit of margin on either side.\n margin : float (default=0.08)\n The margin, expressed as a percentage of total data width, to place\n on either side of the data if tight_bounds is False.\n track_amount : number\n The amount by which a 'track' bound tracks another bound.\n bounds_func : Callable\n A callable which can override the bounds calculation.\n\n Returns\n -------\n (min, max) for the new bounds. If either of the calculated bounds is NaN,\n returns (0,0).\n\n Description\n -----------\n Setting both *low_set* and *high_set* to 'track' is an invalid state;\n the method copes by setting *high_set* to 'auto', and proceeding.\n \"\"\"\n\n if (low_set == \"track\") and (high_set == \"track\"):\n high_set = \"auto\"\n\n if low_set == \"auto\":\n real_min = min(mins)\n elif low_set == \"track\":\n # real_max hasn't been set yet\n pass\n else:\n real_min = low_set\n\n if high_set == \"auto\":\n real_max = max(maxes)\n elif high_set == \"track\":\n # real_min has been set now\n real_max = real_min + track_amount\n else:\n real_max = high_set\n\n # Go back and set real_min if we need to\n if low_set == \"track\":\n real_min = real_max - track_amount\n\n # If we're all NaNs, just return a 0,1 range\n if isnan(real_max) or isnan(real_min):\n return 0, 0\n\n if (\n not isinf(real_min)\n and not isinf(real_max)\n and (abs(real_max - real_min) <= abs(epsilon * real_min))\n ):\n # If we get here, then real_min and real_max are (for all\n # intents and purposes) identical, and so we just base\n # everything off of real_min.\n # Note: we have to use <= and not strict < because otherwise\n # we won't catch the cases when real_min == 0.0.\n if abs(real_min) > 1.0:\n # Round up to the next power of ten that encloses these\n log_val = log(abs(real_min), 10)\n if real_min >= 0:\n real_min = pow(10, floor(log_val))\n real_max = pow(10, ceil(log_val))\n else:\n real_min = -pow(10, ceil(log_val))\n real_max = -pow(10, floor(log_val))\n else:\n # If the user has a constant value less than 1, then these\n # are the bounds we use.\n if real_min > 0.0:\n real_max = 2 * real_min\n real_min = 0.0\n elif real_min == 0.0:\n real_min = -1.0\n real_max = 1.0\n else:\n real_min = 2 * real_min\n real_max = 0.0\n\n # Now test if the bounds leave some room around the data, unless\n # tight_bounds==True or unless another function to compute the bound\n # is provided.\n if bounds_func is not None:\n return bounds_func(real_min, real_max, margin, tight_bounds)\n elif not tight_bounds:\n low, high, d = heckbert_interval(real_min, real_max)\n # 2nd run of heckbert_interval necessary? Will be if bounds are\n # too tights (ie within the margin).\n rerun = False\n if abs(low - real_min) / (high - low) < margin:\n modified_min = real_min - (high - low) * margin\n rerun = True\n else:\n modified_min = real_min\n if abs(high - real_max) / (high - low) < margin:\n modified_max = real_max + (high - low) * margin\n rerun = True\n else:\n modified_max = real_max\n if rerun:\n low, high, d = heckbert_interval(modified_min, modified_max)\n return low, high\n else:\n return real_min, real_max\n", "path": "chaco/data_range_1d.py" } ]
[ { "content": "# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only under\n# the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n#\n# Thanks for using Enthought open source!\n\n\"\"\"\nDefines the DataRange1D class.\n\"\"\"\n\n\n# Major library imports\nfrom math import ceil, floor, log\n\nfrom numpy import compress, errstate, inf, isinf, isnan, ndarray\n\n# Enthought library imports\nfrom traits.api import (\n Bool, CFloat, Constant, Enum, Float, Property, Callable, Union)\n\n# Local relative imports\nfrom .base import arg_find_runs\nfrom .base_data_range import BaseDataRange\nfrom .ticks import heckbert_interval\n\n\nclass DataRange1D(BaseDataRange):\n \"\"\"Represents a 1-D data range.\"\"\"\n\n #: The actual value of the lower bound of this range (overrides\n #: AbstractDataRange). To set it, use **low_setting**.\n low = Property\n #: The actual value of the upper bound of this range (overrides\n #: AbstractDataRange). To set it, use **high_setting**.\n high = Property\n\n #: Property for the lower bound of this range (overrides AbstractDataRange).\n #:\n #: * 'auto': The lower bound is automatically set at or below the minimum\n #: of the data.\n #: * 'track': The lower bound tracks the upper bound by **tracking_amount**.\n #: * CFloat: An explicit value for the lower bound\n low_setting = Property(Union(Constant(\"auto\"), Constant(\"track\"), CFloat))\n #: Property for the upper bound of this range (overrides AbstractDataRange).\n #:\n #: * 'auto': The upper bound is automatically set at or above the maximum\n #: of the data.\n #: * 'track': The upper bound tracks the lower bound by **tracking_amount**.\n #: * CFloat: An explicit value for the upper bound\n high_setting = Property(Union(Constant(\"auto\"), Constant(\"track\"), CFloat))\n\n #: Do \"auto\" bounds imply an exact fit to the data? If False,\n #: they pad a little bit of margin on either side.\n tight_bounds = Bool(True)\n\n #: A user supplied function returning the proper bounding interval.\n #: bounds_func takes (data_low, data_high, margin, tight_bounds)\n #: and returns (low, high)\n bounds_func = Callable\n\n #: The amount of margin to place on either side of the data, expressed as\n #: a percentage of the full data width\n margin = Float(0.05)\n\n #: The minimum percentage difference between low and high. That is,\n #: (high-low) >= epsilon * low.\n #: Used to be 1.0e-20 but chaco cannot plot at such a precision!\n epsilon = CFloat(1.0e-10)\n\n #: When either **high** or **low** tracks the other, track by this amount.\n default_tracking_amount = CFloat(20.0)\n\n #: The current tracking amount. This value changes with zooming.\n tracking_amount = default_tracking_amount\n\n #: Default tracking state. This value is used when self.reset() is called.\n #:\n #: * 'auto': Both bounds reset to 'auto'.\n #: * 'high_track': The high bound resets to 'track', and the low bound\n #: resets to 'auto'.\n #: * 'low_track': The low bound resets to 'track', and the high bound\n #: resets to 'auto'.\n default_state = Enum(\"auto\", \"high_track\", \"low_track\")\n\n #: FIXME: this attribute is not used anywhere, is it safe to remove it?\n #: Is this range dependent upon another range?\n fit_to_subset = Bool(False)\n\n # ------------------------------------------------------------------------\n # Private traits\n # ------------------------------------------------------------------------\n\n # The \"_setting\" attributes correspond to what the user has \"set\"; the\n # \"_value\" attributes are the actual numerical values for the given\n # setting.\n\n # The user-specified low setting.\n _low_setting = Union(Enum(\"auto\", \"track\"), CFloat)\n # The actual numerical value for the low setting.\n _low_value = CFloat(-inf)\n # The user-specified high setting.\n _high_setting = Union(Enum(\"auto\", \"track\"), CFloat)\n # The actual numerical value for the high setting.\n _high_value = CFloat(inf)\n\n # ------------------------------------------------------------------------\n # AbstractRange interface\n # ------------------------------------------------------------------------\n\n def clip_data(self, data):\n \"\"\"Returns a list of data values that are within the range.\n\n Implements AbstractDataRange.\n \"\"\"\n return compress(self.mask_data(data), data)\n\n def mask_data(self, data):\n \"\"\"Returns a mask array, indicating whether values in the given array\n are inside the range.\n\n Implements AbstractDataRange.\n \"\"\"\n with errstate(invalid=\"ignore\"):\n # Running under context because the data array may contain NaNs.\n # These are strictly invalid for comparison and Numpy would emit\n # a warning. Since we are happy with the default behavior (NaNs\n # become \"False\" in the mask), we silence the warning.\n mask = (data.view(ndarray) >= self._low_value) & (\n data.view(ndarray) <= self._high_value\n )\n return mask\n\n def bound_data(self, data):\n \"\"\"Returns a tuple of indices for the start and end of the first run\n of *data* that falls within the range.\n\n Implements AbstractDataRange.\n \"\"\"\n mask = self.mask_data(data)\n runs = arg_find_runs(mask, \"flat\")\n # Since runs of \"0\" are also considered runs, we have to cycle through\n # until we find the first run of \"1\"s.\n for run in runs:\n if mask[run[0]] == 1:\n # arg_find_runs returns 1 past the end\n return run[0], run[1] - 1\n return (0, 0)\n\n def set_bounds(self, low, high):\n \"\"\"Sets all the bounds of the range simultaneously.\n\n Implements AbstractDataRange.\n \"\"\"\n if low == \"track\":\n # Set the high setting first\n result_high = self._do_set_high_setting(high, fire_event=False)\n result_low = self._do_set_low_setting(low, fire_event=False)\n result = result_low or result_high\n else:\n # Either set low first or order doesn't matter\n result_low = self._do_set_low_setting(low, fire_event=False)\n result_high = self._do_set_high_setting(high, fire_event=False)\n result = result_high or result_low\n if result:\n self.updated = result\n\n def scale_tracking_amount(self, multiplier):\n \"\"\"Sets the **tracking_amount** to a new value, scaled by *multiplier*.\"\"\"\n self.tracking_amount = self.tracking_amount * multiplier\n self._do_track()\n\n def set_tracking_amount(self, amount):\n \"\"\"Sets the **tracking_amount** to a new value, *amount*.\"\"\"\n self.tracking_amount = amount\n self._do_track()\n\n def set_default_tracking_amount(self, amount):\n \"\"\"Sets the **default_tracking_amount** to a new value, *amount*.\"\"\"\n self.default_tracking_amount = amount\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n\n def reset(self):\n \"\"\"Resets the bounds of this range, based on **default_state**.\"\"\"\n # need to maintain 'track' setting\n if self.default_state == \"auto\":\n self._high_setting = \"auto\"\n self._low_setting = \"auto\"\n elif self.default_state == \"low_track\":\n self._high_setting = \"auto\"\n self._low_setting = \"track\"\n elif self.default_state == \"high_track\":\n self._high_setting = \"track\"\n self._low_setting = \"auto\"\n self._refresh_bounds()\n self.tracking_amount = self.default_tracking_amount\n\n def refresh(self, event=None):\n \"\"\"If any of the bounds is 'auto', this method refreshes the actual\n low and high values from the set of the view filters' data sources.\n \"\"\"\n if (\"auto\" in (self._low_setting, self._high_setting)) or (\n \"track\" in (self._low_setting, self._high_setting)\n ):\n # If the user has hard-coded bounds, then refresh() doesn't do\n # anything.\n self._refresh_bounds()\n else:\n return\n\n # ------------------------------------------------------------------------\n # Private methods (getters and setters)\n # ------------------------------------------------------------------------\n\n def _get_low(self):\n return float(self._low_value)\n\n def _set_low(self, val):\n return self._set_low_setting(val)\n\n def _get_low_setting(self):\n return self._low_setting\n\n def _do_set_low_setting(self, val, fire_event=True):\n \"\"\"\n Returns\n -------\n If fire_event is False and the change would have fired an event, returns\n the tuple of the new low and high values. Otherwise returns None. In\n particular, if fire_event is True, it always returns None.\n \"\"\"\n new_values = None\n if self._low_setting != val:\n\n # Save the new setting.\n self._low_setting = val\n\n # If val is 'auto' or 'track', get the corresponding numerical\n # value.\n if val == \"auto\":\n if len(self.sources) > 0:\n val = min(\n [source.get_bounds()[0] for source in self.sources]\n )\n else:\n val = -inf\n elif val == \"track\":\n if len(self.sources) > 0 or self._high_setting != \"auto\":\n val = self._high_value - self.tracking_amount\n else:\n val = -inf\n\n # val is now a numerical value. If it is the same as the current\n # value, there is nothing to do.\n if self._low_value != val:\n self._low_value = val\n if self._high_setting == \"track\":\n self._high_value = val + self.tracking_amount\n if fire_event:\n self.updated = (self._low_value, self._high_value)\n else:\n new_values = (self._low_value, self._high_value)\n\n return new_values\n\n def _set_low_setting(self, val):\n self._do_set_low_setting(val, True)\n\n def _get_high(self):\n return float(self._high_value)\n\n def _set_high(self, val):\n return self._set_high_setting(val)\n\n def _get_high_setting(self):\n return self._high_setting\n\n def _do_set_high_setting(self, val, fire_event=True):\n \"\"\"\n Returns\n -------\n If fire_event is False and the change would have fired an event, returns\n the tuple of the new low and high values. Otherwise returns None. In\n particular, if fire_event is True, it always returns None.\n \"\"\"\n new_values = None\n if self._high_setting != val:\n\n # Save the new setting.\n self._high_setting = val\n\n # If val is 'auto' or 'track', get the corresponding numerical\n # value.\n if val == \"auto\":\n if len(self.sources) > 0:\n val = max(\n [source.get_bounds()[1] for source in self.sources]\n )\n else:\n val = inf\n elif val == \"track\":\n if len(self.sources) > 0 or self._low_setting != \"auto\":\n val = self._low_value + self.tracking_amount\n else:\n val = inf\n\n # val is now a numerical value. If it is the same as the current\n # value, there is nothing to do.\n if self._high_value != val:\n self._high_value = val\n if self._low_setting == \"track\":\n self._low_value = val - self.tracking_amount\n if fire_event:\n self.updated = (self._low_value, self._high_value)\n else:\n new_values = (self._low_value, self._high_value)\n\n return new_values\n\n def _set_high_setting(self, val):\n self._do_set_high_setting(val, True)\n\n def _refresh_bounds(self):\n null_bounds = False\n if len(self.sources) == 0:\n null_bounds = True\n else:\n bounds_list = [\n source.get_bounds()\n for source in self.sources\n if source.get_size() > 0\n ]\n\n if len(bounds_list) == 0:\n null_bounds = True\n\n if null_bounds:\n # If we have no sources and our settings are \"auto\", then reset our\n # bounds to infinity; otherwise, set the _value to the corresponding\n # setting.\n if self._low_setting in (\"auto\", \"track\"):\n self._low_value = -inf\n else:\n self._low_value = self._low_setting\n if self._high_setting in (\"auto\", \"track\"):\n self._high_value = inf\n else:\n self._high_value = self._high_setting\n self.updated = (self._low_value, self._high_value)\n return\n else:\n mins, maxes = zip(*bounds_list)\n\n low_start, high_start = calc_bounds(\n self._low_setting,\n self._high_setting,\n mins,\n maxes,\n self.epsilon,\n self.tight_bounds,\n margin=self.margin,\n track_amount=self.tracking_amount,\n bounds_func=self.bounds_func,\n )\n\n if (self._low_value != low_start) or (self._high_value != high_start):\n self._low_value = low_start\n self._high_value = high_start\n self.updated = (self._low_value, self._high_value)\n\n def _do_track(self):\n changed = False\n if self._low_setting == \"track\":\n new_value = self._high_value - self.tracking_amount\n if self._low_value != new_value:\n self._low_value = new_value\n changed = True\n elif self._high_setting == \"track\":\n new_value = self._low_value + self.tracking_amount\n if self._high_value != new_value:\n self._high_value = new_value\n changed = True\n if changed:\n self.updated = (self._low_value, self._high_value)\n\n # ------------------------------------------------------------------------\n # Event handlers\n # ------------------------------------------------------------------------\n\n def _sources_items_changed(self, event):\n self.refresh()\n for source in event.removed:\n source.observe(self.refresh, \"data_changed\", remove=True)\n for source in event.added:\n source.observe(self.refresh, \"data_changed\")\n\n def _sources_changed(self, old, new):\n self.refresh()\n for source in old:\n source.observe(self.refresh, \"data_changed\", remove=True)\n for source in new:\n source.observe(self.refresh, \"data_changed\")\n\n # ------------------------------------------------------------------------\n # Serialization interface\n # ------------------------------------------------------------------------\n\n def _post_load(self):\n self._sources_changed(None, self.sources)\n\n\n# method to calculate bounds for a given 1-dimensional set of data\ndef calc_bounds(\n low_set,\n high_set,\n mins,\n maxes,\n epsilon,\n tight_bounds,\n margin=0.08,\n track_amount=0,\n bounds_func=None,\n):\n \"\"\"Calculates bounds for a given 1-D set of data.\n\n Parameters\n ----------\n low_set : 'auto', 'track', or number\n Current low setting\n high_set : 'auto', 'track', or number\n Current high setting\n mins : list of numbers\n Potential minima.\n maxes : list\n Potential maxima.\n epsilon : number\n Minimum percentage difference between bounds\n tight_bounds : Boolean\n Do 'auto' bounds imply an exact fit to the data? If False, they pad a\n little bit of margin on either side.\n margin : float (default=0.08)\n The margin, expressed as a percentage of total data width, to place\n on either side of the data if tight_bounds is False.\n track_amount : number\n The amount by which a 'track' bound tracks another bound.\n bounds_func : Callable\n A callable which can override the bounds calculation.\n\n Returns\n -------\n (min, max) for the new bounds. If either of the calculated bounds is NaN,\n returns (0,0).\n\n Description\n -----------\n Setting both *low_set* and *high_set* to 'track' is an invalid state;\n the method copes by setting *high_set* to 'auto', and proceeding.\n \"\"\"\n\n if (low_set == \"track\") and (high_set == \"track\"):\n high_set = \"auto\"\n\n if low_set == \"auto\":\n real_min = min(mins)\n elif low_set == \"track\":\n # real_max hasn't been set yet\n pass\n else:\n real_min = low_set\n\n if high_set == \"auto\":\n real_max = max(maxes)\n elif high_set == \"track\":\n # real_min has been set now\n real_max = real_min + track_amount\n else:\n real_max = high_set\n\n # Go back and set real_min if we need to\n if low_set == \"track\":\n real_min = real_max - track_amount\n\n # If we're all NaNs, just return a 0,1 range\n if isnan(real_max) or isnan(real_min):\n return 0, 0\n\n if (\n not isinf(real_min)\n and not isinf(real_max)\n and (abs(real_max - real_min) <= abs(epsilon * real_min))\n ):\n # If we get here, then real_min and real_max are (for all\n # intents and purposes) identical, and so we just base\n # everything off of real_min.\n # Note: we have to use <= and not strict < because otherwise\n # we won't catch the cases when real_min == 0.0.\n if abs(real_min) > 1.0:\n # Round up to the next power of ten that encloses these\n log_val = log(abs(real_min), 10)\n if real_min >= 0:\n real_min = pow(10, floor(log_val))\n real_max = pow(10, ceil(log_val))\n else:\n real_min = -pow(10, ceil(log_val))\n real_max = -pow(10, floor(log_val))\n else:\n # If the user has a constant value less than 1, then these\n # are the bounds we use.\n if real_min > 0.0:\n real_max = 2 * real_min\n real_min = 0.0\n elif real_min == 0.0:\n real_min = -1.0\n real_max = 1.0\n else:\n real_min = 2 * real_min\n real_max = 0.0\n\n # Now test if the bounds leave some room around the data, unless\n # tight_bounds==True or unless another function to compute the bound\n # is provided.\n if bounds_func is not None:\n return bounds_func(real_min, real_max, margin, tight_bounds)\n elif not tight_bounds:\n low, high, d = heckbert_interval(real_min, real_max)\n # 2nd run of heckbert_interval necessary? Will be if bounds are\n # too tights (ie within the margin).\n rerun = False\n if abs(low - real_min) / (high - low) < margin:\n modified_min = real_min - (high - low) * margin\n rerun = True\n else:\n modified_min = real_min\n if abs(high - real_max) / (high - low) < margin:\n modified_max = real_max + (high - low) * margin\n rerun = True\n else:\n modified_max = real_max\n if rerun:\n low, high, d = heckbert_interval(modified_min, modified_max)\n return low, high\n else:\n return real_min, real_max\n", "path": "chaco/data_range_1d.py" } ]
diff --git a/chaco/data_range_1d.py b/chaco/data_range_1d.py index e75600f0c..2f7ce79a4 100644 --- a/chaco/data_range_1d.py +++ b/chaco/data_range_1d.py @@ -352,6 +352,7 @@ def _refresh_bounds(self): self._high_value = inf else: self._high_value = self._high_setting + self.updated = (self._low_value, self._high_value) return else: mins, maxes = zip(*bounds_list) diff --git a/chaco/tests/test_datarange_1d.py b/chaco/tests/test_datarange_1d.py index 52910dedb..f46e00a0a 100644 --- a/chaco/tests/test_datarange_1d.py +++ b/chaco/tests/test_datarange_1d.py @@ -15,6 +15,7 @@ from numpy.testing import assert_equal from traits.api import HasTraits, Instance, Bool, observe +from traits.testing.api import UnittestTools from chaco.api import DataRange1D, ArrayDataSource @@ -35,7 +36,7 @@ def range_changed(self, event): self.range_updated = True -class DataRangeTestCase(unittest.TestCase): +class DataRangeTestCase(UnittestTools, unittest.TestCase): def test_empty_range(self): r = DataRange1D() self.assertEqual(r.low, -inf) @@ -337,3 +338,28 @@ def test_inf_in_source(self): r.sources.append(ds1) self.assertEqual(r.low, -inf) self.assertEqual(r.high, inf) + + def test_sources_changed_auto(self): + ds1 = ArrayDataSource(array([3, 4, 5, 6, 7])) + ds2 = ArrayDataSource(array([5, 10, 15, 20])) + r = DataRange1D() + events = [] + r.observe(events.append, 'updated') + + self.assertEqual(r.low, -inf) + self.assertEqual(r.high, inf) + + with self.assertTraitChanges(r, "updated", count=1): + r.add(ds1) + + self.assertEqual(events[-1].new, (3, 7)) + self.assertEqual(r.low, 3) + self.assertEqual(r.high, 7) + + + with self.assertTraitChanges(r, "updated", count=1): + r.remove(ds1) + + self.assertEqual(events[-1].new, (-inf, inf)) + self.assertEqual(r.low, -inf) + self.assertEqual(r.high, inf)
ansible-collections__community.vmware-1706
Invalid return type for vmware_vsan_health_info <!--- Verify first that your improvement is not already reported on GitHub --> <!--- Also test if the latest release and devel branch are affected too --> <!--- Complete *all* sections as described, this form is processed automatically --> ##### SUMMARY <!--- Explain the problem briefly below, add suggestions to wording or structure --> The documentation for vmware_vsan_health_info states that the [return type is dict](https://github.com/ansible-collections/community.vmware/blob/fa19ae68c0a720ab2f12122c9edb2d90d73880e9/plugins/modules/vmware_vsan_health_info.py#L66) but the response is wrapped into a `json.dumps` [converting it to string](https://github.com/ansible-collections/community.vmware/blob/fa19ae68c0a720ab2f12122c9edb2d90d73880e9/plugins/modules/vmware_vsan_health_info.py#L168). I am uncertain whether the return type documentation or the returned object should be adapted, but the current inconsistency should be fixed. <!--- HINT: Did you know the documentation has an "Edit on GitHub" link on every page ? --> ##### ISSUE TYPE - Documentation Report ##### COMPONENT NAME <!--- Write the short name of the rst file, module, plugin, task or feature below, use your best guess if unsure --> vmware_vsan_health_info ##### ANSIBLE VERSION <!--- Paste verbatim output from "ansible --version" between quotes --> ```paste below ansible [core 2.14.2] config file = None configured module search path = ['/var/home/philipp/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /var/home/philipp/.local/pipx/venvs/ansible-core/lib64/python3.11/site-packages/ansible ansible collection location = /var/home/philipp/.ansible/collections:/usr/share/ansible/collections executable location = /var/home/philipp/.local/bin/ansible python version = 3.11.1 (main, Jan 6 2023, 00:00:00) [GCC 12.2.1 20221121 (Red Hat 12.2.1-4)] (/var/home/philipp/.local/pipx/venvs/ansible-core/bin/python) jinja version = 3.1.2 libyaml = True ```
[ { "content": "#!/usr/bin/python\n\n# Copyright: (c) 2019, OVH SAS\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_vsan_health_info\nshort_description: Gather information about a VMware vSAN cluster's health\ndescription:\n - \"Gather information about a VMware vSAN cluster's health.\"\noptions:\n datacenter:\n description:\n - Name of the Datacenter.\n required: false\n type: str\n aliases: [ 'datacenter_name' ]\n cluster_name:\n description:\n - Name of the vSAN cluster.\n required: true\n type: str\n fetch_from_cache:\n description:\n - C(true) to return the result from cache directly instead of running the full health check.\n required: false\n default: false\n type: bool\nrequirements:\n - VMware vSAN Python's SDK\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\nauthor:\n - Erwan Quelin (@equelin)\n'''\n\nEXAMPLES = r'''\n- name: Gather health info from a vSAN's cluster\n community.vmware.vmware_vsan_health_info:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n cluster_name: 'vSAN01'\n fetch_from_cache: false\n\n- name: Gather health info from a vSAN's cluster with datacenter\n community.vmware.vmware_vsan_health_info:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n cluster_name: 'vSAN01'\n datacenter: 'Datacenter_01'\n fetch_from_cache: true\n'''\n\nRETURN = r'''\nvsan_health_info:\n description: vSAN cluster health info\n returned: on success\n type: dict\n sample: {\n \"_vimtype\": \"vim.cluster.VsanClusterHealthSummary\",\n \"burnInTest\": null,\n \"clusterStatus\": {\n \"_vimtype\": \"vim.cluster.VsanClusterHealthSystemStatusResult\",\n \"goalState\": \"installed\",\n \"status\": \"green\",\n \"trackedHostsStatus\": [\n {\n \"_vimtype\": \"vim.host.VsanHostHealthSystemStatusResult\",\n \"hostname\": \"esxi01.example.com\",\n \"issues\": [],\n \"status\": \"green\"\n },\n {\n \"_vimtype\": \"vim.host.VsanHostHealthSystemStatusResult\",\n \"hostname\": \"esxi04.example.com\",\n \"issues\": [],\n \"status\": \"green\"\n },\n {\n \"_vimtype\": \"vim.host.VsanHostHealthSystemStatusResult\",\n \"hostname\": \"esxi02.example.com\",\n \"issues\": [],\n \"status\": \"green\"\n },\n {\n \"_vimtype\": \"vim.host.VsanHostHealthSystemStatusResult\",\n \"hostname\": \"esxi03.example.com\",\n \"issues\": [],\n \"status\": \"green\"\n }\n ],\n \"untrackedHosts\": []\n }\n }\n'''\n\nimport json\nimport traceback\n\ntry:\n from pyVmomi import vmodl, VmomiSupport\n HAS_PYVMOMI = True\n HAS_PYVMOMIJSON = hasattr(VmomiSupport, 'VmomiJSONEncoder')\nexcept ImportError:\n PYVMOMI_IMP_ERR = traceback.format_exc()\n HAS_PYVMOMI = False\n HAS_PYVMOMIJSON = False\n\nVSANPYTHONSDK_IMP_ERR = None\ntry:\n import vsanapiutils\n HAS_VSANPYTHONSDK = True\nexcept ImportError:\n VSANPYTHONSDK_IMP_ERR = traceback.format_exc()\n HAS_VSANPYTHONSDK = False\n\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi\n\n\nclass VSANInfoManager(PyVmomi):\n def __init__(self, module):\n super(VSANInfoManager, self).__init__(module)\n self.datacenter = None\n self.cluster = None\n\n def gather_info(self):\n datacenter_name = self.module.params.get('datacenter')\n if datacenter_name:\n self.datacenter = self.find_datacenter_by_name(datacenter_name)\n if self.datacenter is None:\n self.module.fail_json(msg=\"Datacenter %s does not exist.\" % datacenter_name)\n\n cluster_name = self.module.params.get('cluster_name')\n self.cluster = self.find_cluster_by_name(cluster_name=cluster_name, datacenter_name=self.datacenter)\n if self.cluster is None:\n self.module.fail_json(msg=\"Cluster %s does not exist.\" % cluster_name)\n\n fetch_from_cache = self.module.params.get('fetch_from_cache')\n\n client_stub = self.si._GetStub()\n ssl_context = client_stub.schemeArgs.get('context')\n\n api_version = vsanapiutils.GetLatestVmodlVersion(self.module.params['hostname'])\n vc_mos = vsanapiutils.GetVsanVcMos(client_stub, context=ssl_context, version=api_version)\n\n vsan_cluster_health_system = vc_mos['vsan-cluster-health-system']\n\n cluster_health = {}\n try:\n cluster_health = vsan_cluster_health_system.VsanQueryVcClusterHealthSummary(\n cluster=self.cluster,\n fetchFromCache=fetch_from_cache,\n )\n except vmodl.fault.NotFound as not_found:\n self.module.fail_json(msg=not_found.msg)\n except vmodl.fault.RuntimeFault as runtime_fault:\n self.module.fail_json(msg=runtime_fault.msg)\n\n health = json.dumps(cluster_health, cls=VmomiSupport.VmomiJSONEncoder, sort_keys=True, strip_dynamic=True)\n\n self.module.exit_json(changed=False, vsan_health_info=health)\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n datacenter=dict(required=False, type='str', aliases=['datacenter_name']),\n cluster_name=dict(required=True, type='str'),\n fetch_from_cache=dict(required=False, type='bool', default=False)\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True\n )\n\n if not HAS_VSANPYTHONSDK:\n module.fail_json(msg=missing_required_lib('vSAN Management SDK for Python'), exception=VSANPYTHONSDK_IMP_ERR)\n\n if not HAS_PYVMOMIJSON:\n module.fail_json(msg='The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1')\n\n vsan_info_manager = VSANInfoManager(module)\n vsan_info_manager.gather_info()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_vsan_health_info.py" } ]
[ { "content": "#!/usr/bin/python\n\n# Copyright: (c) 2019, OVH SAS\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_vsan_health_info\nshort_description: Gather information about a VMware vSAN cluster's health\ndescription:\n - \"Gather information about a VMware vSAN cluster's health.\"\noptions:\n datacenter:\n description:\n - Name of the Datacenter.\n required: false\n type: str\n aliases: [ 'datacenter_name' ]\n cluster_name:\n description:\n - Name of the vSAN cluster.\n required: true\n type: str\n fetch_from_cache:\n description:\n - C(true) to return the result from cache directly instead of running the full health check.\n required: false\n default: false\n type: bool\nrequirements:\n - VMware vSAN Python's SDK\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\nauthor:\n - Erwan Quelin (@equelin)\n'''\n\nEXAMPLES = r'''\n- name: Gather health info from a vSAN's cluster\n community.vmware.vmware_vsan_health_info:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n cluster_name: 'vSAN01'\n fetch_from_cache: false\n\n- name: Gather health info from a vSAN's cluster with datacenter\n community.vmware.vmware_vsan_health_info:\n hostname: \"{{ vcenter_hostname }}\"\n username: \"{{ vcenter_username }}\"\n password: \"{{ vcenter_password }}\"\n cluster_name: 'vSAN01'\n datacenter: 'Datacenter_01'\n fetch_from_cache: true\n'''\n\nRETURN = r'''\nvsan_health_info:\n description: vSAN cluster health info\n returned: on success\n type: dict\n sample: {\n \"_vimtype\": \"vim.cluster.VsanClusterHealthSummary\",\n \"burnInTest\": null,\n \"clusterStatus\": {\n \"_vimtype\": \"vim.cluster.VsanClusterHealthSystemStatusResult\",\n \"goalState\": \"installed\",\n \"status\": \"green\",\n \"trackedHostsStatus\": [\n {\n \"_vimtype\": \"vim.host.VsanHostHealthSystemStatusResult\",\n \"hostname\": \"esxi01.example.com\",\n \"issues\": [],\n \"status\": \"green\"\n },\n {\n \"_vimtype\": \"vim.host.VsanHostHealthSystemStatusResult\",\n \"hostname\": \"esxi04.example.com\",\n \"issues\": [],\n \"status\": \"green\"\n },\n {\n \"_vimtype\": \"vim.host.VsanHostHealthSystemStatusResult\",\n \"hostname\": \"esxi02.example.com\",\n \"issues\": [],\n \"status\": \"green\"\n },\n {\n \"_vimtype\": \"vim.host.VsanHostHealthSystemStatusResult\",\n \"hostname\": \"esxi03.example.com\",\n \"issues\": [],\n \"status\": \"green\"\n }\n ],\n \"untrackedHosts\": []\n }\n }\n'''\n\nimport json\nimport traceback\n\ntry:\n from pyVmomi import vmodl, VmomiSupport\n HAS_PYVMOMI = True\n HAS_PYVMOMIJSON = hasattr(VmomiSupport, 'VmomiJSONEncoder')\nexcept ImportError:\n PYVMOMI_IMP_ERR = traceback.format_exc()\n HAS_PYVMOMI = False\n HAS_PYVMOMIJSON = False\n\nVSANPYTHONSDK_IMP_ERR = None\ntry:\n import vsanapiutils\n HAS_VSANPYTHONSDK = True\nexcept ImportError:\n VSANPYTHONSDK_IMP_ERR = traceback.format_exc()\n HAS_VSANPYTHONSDK = False\n\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi\n\n\nclass VSANInfoManager(PyVmomi):\n def __init__(self, module):\n super(VSANInfoManager, self).__init__(module)\n self.datacenter = None\n self.cluster = None\n\n def gather_info(self):\n datacenter_name = self.module.params.get('datacenter')\n if datacenter_name:\n self.datacenter = self.find_datacenter_by_name(datacenter_name)\n if self.datacenter is None:\n self.module.fail_json(msg=\"Datacenter %s does not exist.\" % datacenter_name)\n\n cluster_name = self.module.params.get('cluster_name')\n self.cluster = self.find_cluster_by_name(cluster_name=cluster_name, datacenter_name=self.datacenter)\n if self.cluster is None:\n self.module.fail_json(msg=\"Cluster %s does not exist.\" % cluster_name)\n\n fetch_from_cache = self.module.params.get('fetch_from_cache')\n\n client_stub = self.si._GetStub()\n ssl_context = client_stub.schemeArgs.get('context')\n\n api_version = vsanapiutils.GetLatestVmodlVersion(self.module.params['hostname'])\n vc_mos = vsanapiutils.GetVsanVcMos(client_stub, context=ssl_context, version=api_version)\n\n vsan_cluster_health_system = vc_mos['vsan-cluster-health-system']\n\n cluster_health = {}\n try:\n cluster_health = vsan_cluster_health_system.VsanQueryVcClusterHealthSummary(\n cluster=self.cluster,\n fetchFromCache=fetch_from_cache,\n )\n except vmodl.fault.NotFound as not_found:\n self.module.fail_json(msg=not_found.msg)\n except vmodl.fault.RuntimeFault as runtime_fault:\n self.module.fail_json(msg=runtime_fault.msg)\n\n health = json.dumps(cluster_health, cls=VmomiSupport.VmomiJSONEncoder, sort_keys=True, strip_dynamic=True)\n\n self.module.exit_json(changed=False, vsan_health_info=json.loads(health))\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n datacenter=dict(required=False, type='str', aliases=['datacenter_name']),\n cluster_name=dict(required=True, type='str'),\n fetch_from_cache=dict(required=False, type='bool', default=False)\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True\n )\n\n if not HAS_VSANPYTHONSDK:\n module.fail_json(msg=missing_required_lib('vSAN Management SDK for Python'), exception=VSANPYTHONSDK_IMP_ERR)\n\n if not HAS_PYVMOMIJSON:\n module.fail_json(msg='The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1')\n\n vsan_info_manager = VSANInfoManager(module)\n vsan_info_manager.gather_info()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/vmware_vsan_health_info.py" } ]
diff --git a/changelogs/fragments/1706-vmware_vsan_health_info.yml b/changelogs/fragments/1706-vmware_vsan_health_info.yml new file mode 100644 index 0000000000..a3db51a2f5 --- /dev/null +++ b/changelogs/fragments/1706-vmware_vsan_health_info.yml @@ -0,0 +1,2 @@ +bugfixes: + - vmware_vsan_health_info - Fix return value (https://github.com/ansible-collections/community.vmware/pull/1706). diff --git a/plugins/modules/vmware_vsan_health_info.py b/plugins/modules/vmware_vsan_health_info.py index d369efb571..89db649de6 100644 --- a/plugins/modules/vmware_vsan_health_info.py +++ b/plugins/modules/vmware_vsan_health_info.py @@ -167,7 +167,7 @@ def gather_info(self): health = json.dumps(cluster_health, cls=VmomiSupport.VmomiJSONEncoder, sort_keys=True, strip_dynamic=True) - self.module.exit_json(changed=False, vsan_health_info=health) + self.module.exit_json(changed=False, vsan_health_info=json.loads(health)) def main():
svthalia__concrexit-1739
Renewal of membership send email about references without reason ### Describe the bug A renewal of a membership to the 'until study end' triggered the email for references. ### How to reproduce Steps to reproduce the behaviour: 1. Create a registration for a completely new user 2. Create an upgrade for the membership 3. Get the email ### Expected behaviour No email should be received for references. ### Screenshots <img width="929" alt="Screenshot 2021-06-20 at 15 20 10" src="https://user-images.githubusercontent.com/1799914/122675750-2a245800-d1db-11eb-93eb-d02abc0f63d1.png"> <img width="656" alt="Screenshot 2021-06-20 at 15 20 20" src="https://user-images.githubusercontent.com/1799914/122675752-2b558500-d1db-11eb-9763-3c1f622826e7.png">
[ { "content": "\"\"\"The forms defined by the registrations package.\"\"\"\nfrom django import forms\nfrom django.core.exceptions import NON_FIELD_ERRORS, ValidationError\nfrom django.forms import TypedChoiceField\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Membership\nfrom payments.widgets import SignatureWidget\nfrom registrations import services\nfrom .models import Registration, Renewal, Reference\n\n\nclass BaseRegistrationForm(forms.ModelForm):\n \"\"\"Base form for membership registrations.\"\"\"\n\n birthday = forms.DateField(\n widget=forms.widgets.SelectDateWidget(\n years=range(timezone.now().year - 50, timezone.now().year - 10)\n ),\n label=capfirst(_(\"birthday\")),\n )\n\n privacy_policy = forms.BooleanField(required=True,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n\nclass RegistrationAdminForm(forms.ModelForm):\n \"\"\"Custom admin form for Registration model to add the widget for the signature.\"\"\"\n\n class Meta:\n fields = \"__all__\"\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n\n\nclass MemberRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for member registrations.\"\"\"\n\n this_year = timezone.now().year\n years = reversed(\n [(x, \"{} - {}\".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]\n )\n\n starting_year = TypedChoiceField(\n choices=years,\n coerce=int,\n empty_value=this_year,\n required=False,\n help_text=_(\"What lecture year did you start studying at Radboud University?\"),\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"programme\",\n \"starting_year\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass BenefactorRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for benefactor registrations.\"\"\"\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"contribution\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass RenewalForm(forms.ModelForm):\n \"\"\"Form for membership renewals.\"\"\"\n\n privacy_policy = forms.BooleanField(required=True,)\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n contribution = forms.IntegerField(required=False,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n class Meta:\n model = Renewal\n fields = (\"member\", \"length\", \"contribution\", \"membership_type\")\n\n\nclass ReferenceForm(forms.ModelForm):\n def clean(self):\n super().clean()\n membership = self.cleaned_data[\"member\"].current_membership\n if membership and membership.type == Membership.BENEFACTOR:\n raise ValidationError(_(\"Benefactors cannot give references.\"))\n\n membership = self.cleaned_data[\"member\"].latest_membership\n if (\n membership\n and membership.until\n and membership.until < services.calculate_membership_since()\n ):\n raise ValidationError(\n _(\n \"It's not possible to give references for \"\n \"memberships that start after your own \"\n \"membership's end.\"\n )\n )\n\n class Meta:\n model = Reference\n fields = \"__all__\"\n error_messages = {\n NON_FIELD_ERRORS: {\n \"unique_together\": _(\n \"You've already given a reference for this person.\"\n ),\n }\n }\n", "path": "website/registrations/forms.py" } ]
[ { "content": "\"\"\"The forms defined by the registrations package.\"\"\"\nfrom django import forms\nfrom django.core.exceptions import NON_FIELD_ERRORS, ValidationError\nfrom django.forms import TypedChoiceField\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Membership\nfrom payments.widgets import SignatureWidget\nfrom registrations import services\nfrom .models import Registration, Renewal, Reference\n\n\nclass BaseRegistrationForm(forms.ModelForm):\n \"\"\"Base form for membership registrations.\"\"\"\n\n birthday = forms.DateField(\n widget=forms.widgets.SelectDateWidget(\n years=range(timezone.now().year - 50, timezone.now().year - 10)\n ),\n label=capfirst(_(\"birthday\")),\n )\n\n privacy_policy = forms.BooleanField(required=True,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n\nclass RegistrationAdminForm(forms.ModelForm):\n \"\"\"Custom admin form for Registration model to add the widget for the signature.\"\"\"\n\n class Meta:\n fields = \"__all__\"\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n\n\nclass MemberRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for member registrations.\"\"\"\n\n this_year = timezone.now().year\n years = reversed(\n [(x, \"{} - {}\".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]\n )\n\n starting_year = TypedChoiceField(\n choices=years,\n coerce=int,\n empty_value=this_year,\n required=False,\n help_text=_(\"What lecture year did you start studying at Radboud University?\"),\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"programme\",\n \"starting_year\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass BenefactorRegistrationForm(BaseRegistrationForm):\n \"\"\"Form for benefactor registrations.\"\"\"\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n class Meta:\n model = Registration\n widgets = {\n \"signature\": SignatureWidget(),\n }\n fields = (\n \"length\",\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"optin_birthday\",\n \"optin_mailinglist\",\n \"contribution\",\n \"membership_type\",\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n\n\nclass RenewalForm(forms.ModelForm):\n \"\"\"Form for membership renewals.\"\"\"\n\n privacy_policy = forms.BooleanField(required=True,)\n\n icis_employee = forms.BooleanField(\n required=False, label=_(\"I am an employee of iCIS\")\n )\n\n contribution = forms.IntegerField(required=False,)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"privacy_policy\"].label = mark_safe(\n _('I accept the <a href=\"{}\">privacy policy</a>.').format(\n reverse_lazy(\"singlepages:privacy-policy\")\n )\n )\n\n class Meta:\n model = Renewal\n fields = (\n \"member\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"no_references\",\n \"remarks\",\n )\n\n\nclass ReferenceForm(forms.ModelForm):\n def clean(self):\n super().clean()\n membership = self.cleaned_data[\"member\"].current_membership\n if membership and membership.type == Membership.BENEFACTOR:\n raise ValidationError(_(\"Benefactors cannot give references.\"))\n\n membership = self.cleaned_data[\"member\"].latest_membership\n if (\n membership\n and membership.until\n and membership.until < services.calculate_membership_since()\n ):\n raise ValidationError(\n _(\n \"It's not possible to give references for \"\n \"memberships that start after your own \"\n \"membership's end.\"\n )\n )\n\n class Meta:\n model = Reference\n fields = \"__all__\"\n error_messages = {\n NON_FIELD_ERRORS: {\n \"unique_together\": _(\n \"You've already given a reference for this person.\"\n ),\n }\n }\n", "path": "website/registrations/forms.py" } ]
diff --git a/website/registrations/forms.py b/website/registrations/forms.py index 92f40e92f..616666f38 100644 --- a/website/registrations/forms.py +++ b/website/registrations/forms.py @@ -151,7 +151,14 @@ def __init__(self, *args, **kwargs): class Meta: model = Renewal - fields = ("member", "length", "contribution", "membership_type") + fields = ( + "member", + "length", + "contribution", + "membership_type", + "no_references", + "remarks", + ) class ReferenceForm(forms.ModelForm):
kartoza__prj.app-199
Atom feed gets a 500 error See http://sentry.kartoza.com/kartoza/projecta-live/group/5846/ Problem is cause by models/entry.py get_absolute_url() method which still uses old slug system to resolve path to an entry.
[ { "content": "# coding=utf-8\n\"\"\"Models for changelog entries.\"\"\"\nfrom django.core.urlresolvers import reverse\nfrom django.utils.text import slugify\nimport os\nimport logging\nfrom core.settings.contrib import STOP_WORDS\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.db import models\nfrom embed_video.fields import EmbedVideoField\nfrom django.contrib.auth.models import User\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApprovedEntryManager(models.Manager):\n \"\"\"Custom entry manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedEntryManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedEntryManager(models.Manager):\n \"\"\"Custom entry manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedEntryManager, self).get_queryset().filter(\n approved=False)\n\n\nclass Entry(models.Model):\n \"\"\"An entry is the basic unit of a changelog.\"\"\"\n title = models.CharField(\n help_text='Feature title for this changelog entry.',\n max_length=255,\n null=False,\n blank=False,\n unique=False) # Unique together rule applies in meta class\n\n description = models.TextField(\n null=True,\n blank=True,\n help_text='Describe the new feature. Markdown is supported.')\n\n image_file = models.ImageField(\n help_text=(\n 'A image that is related to this visual changelog entry. '\n 'Most browsers support dragging the image directly on to the '\n '\"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/entries'),\n blank=True)\n\n image_credits = models.CharField(\n help_text='Who should be credited for this image?',\n max_length=255,\n null=True,\n blank=True)\n\n video = EmbedVideoField(\n verbose_name='Youtube video',\n help_text='Paste your youtube video link',\n null=True,\n blank=True)\n\n funded_by = models.CharField(\n help_text='Input the funder name.',\n max_length=255,\n null=True,\n blank=True)\n\n funder_url = models.CharField(\n help_text='Input the funder URL.',\n max_length=255,\n null=True,\n blank=True)\n\n developed_by = models.CharField(\n help_text='Input the developer name.',\n max_length=255,\n null=True,\n blank=True)\n\n developer_url = models.CharField(\n help_text='Input the developer URL.',\n max_length=255,\n null=True,\n blank=True)\n\n approved = models.BooleanField(\n help_text=(\n 'Whether this entry has been approved for use by the '\n 'project owner.'),\n default=False\n )\n author = models.ForeignKey(User)\n slug = models.SlugField()\n # noinspection PyUnresolvedReferences\n version = models.ForeignKey('Version')\n # noinspection PyUnresolvedReferences\n category = models.ForeignKey('Category')\n objects = models.Manager()\n approved_objects = ApprovedEntryManager()\n unapproved_objects = UnapprovedEntryManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta options for the version class.\"\"\"\n unique_together = (\n ('title', 'version', 'category'),\n ('version', 'slug'),\n )\n app_label = 'changes'\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.title.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = slugify(new_list)[:50]\n super(Entry, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.title\n\n def get_absolute_url(self):\n return reverse('entry-detail', kwargs={\n 'slug': self.slug,\n 'version_slug': self.version.slug,\n 'project_slug': self.version.project.slug\n })\n\n def funder_info_html(self):\n string = \"\"\n if self.funded_by and self.funder_url is None:\n string = \"\"\n return string\n elif self.funded_by and not self.funder_url:\n string = \"This feature was funded by %s \" % self.funded_by\n return string\n elif self.funder_url and not self.funded_by:\n string = \"This feature was funded by [%s](%s)\" % (\n self.funder_url, self.funder_url)\n return string\n elif self.funded_by and self.funder_url:\n string = \"This feature was funded by [%s](%s)\" % (\n self.funded_by, self.funder_url)\n return string\n else:\n return string\n\n def developer_info_html(self):\n string = \"\"\n if self.developed_by and self.developer_url is None:\n string = \"\"\n return string\n elif self.developed_by and not self.developer_url:\n string = \"This feature was developed by %s \" % self.developed_by\n return string\n elif self.developer_url and not self.developed_by:\n string = \"This feature was developed by [%s](%s)\" % (\n self.developer_url, self.developer_url)\n return string\n elif self.developed_by and self.developer_url:\n string = \"This feature was developed by [%s](%s)\" % (\n self.developed_by, self.developer_url)\n return string\n else:\n return string\n", "path": "django_project/changes/models/entry.py" } ]
[ { "content": "# coding=utf-8\n\"\"\"Models for changelog entries.\"\"\"\nfrom django.core.urlresolvers import reverse\nfrom django.utils.text import slugify\nimport os\nimport logging\nfrom core.settings.contrib import STOP_WORDS\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.db import models\nfrom embed_video.fields import EmbedVideoField\nfrom django.contrib.auth.models import User\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApprovedEntryManager(models.Manager):\n \"\"\"Custom entry manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedEntryManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedEntryManager(models.Manager):\n \"\"\"Custom entry manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedEntryManager, self).get_queryset().filter(\n approved=False)\n\n\nclass Entry(models.Model):\n \"\"\"An entry is the basic unit of a changelog.\"\"\"\n title = models.CharField(\n help_text='Feature title for this changelog entry.',\n max_length=255,\n null=False,\n blank=False,\n unique=False) # Unique together rule applies in meta class\n\n description = models.TextField(\n null=True,\n blank=True,\n help_text='Describe the new feature. Markdown is supported.')\n\n image_file = models.ImageField(\n help_text=(\n 'A image that is related to this visual changelog entry. '\n 'Most browsers support dragging the image directly on to the '\n '\"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/entries'),\n blank=True)\n\n image_credits = models.CharField(\n help_text='Who should be credited for this image?',\n max_length=255,\n null=True,\n blank=True)\n\n video = EmbedVideoField(\n verbose_name='Youtube video',\n help_text='Paste your youtube video link',\n null=True,\n blank=True)\n\n funded_by = models.CharField(\n help_text='Input the funder name.',\n max_length=255,\n null=True,\n blank=True)\n\n funder_url = models.CharField(\n help_text='Input the funder URL.',\n max_length=255,\n null=True,\n blank=True)\n\n developed_by = models.CharField(\n help_text='Input the developer name.',\n max_length=255,\n null=True,\n blank=True)\n\n developer_url = models.CharField(\n help_text='Input the developer URL.',\n max_length=255,\n null=True,\n blank=True)\n\n approved = models.BooleanField(\n help_text=(\n 'Whether this entry has been approved for use by the '\n 'project owner.'),\n default=False\n )\n author = models.ForeignKey(User)\n slug = models.SlugField()\n # noinspection PyUnresolvedReferences\n version = models.ForeignKey('Version')\n # noinspection PyUnresolvedReferences\n category = models.ForeignKey('Category')\n objects = models.Manager()\n approved_objects = ApprovedEntryManager()\n unapproved_objects = UnapprovedEntryManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta options for the version class.\"\"\"\n unique_together = (\n ('title', 'version', 'category'),\n ('version', 'slug'),\n )\n app_label = 'changes'\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.title.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = slugify(new_list)[:50]\n super(Entry, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.title\n\n def get_absolute_url(self):\n return reverse('entry-detail', kwargs={\n 'pk': self.id\n })\n\n def funder_info_html(self):\n string = \"\"\n if self.funded_by and self.funder_url is None:\n string = \"\"\n return string\n elif self.funded_by and not self.funder_url:\n string = \"This feature was funded by %s \" % self.funded_by\n return string\n elif self.funder_url and not self.funded_by:\n string = \"This feature was funded by [%s](%s)\" % (\n self.funder_url, self.funder_url)\n return string\n elif self.funded_by and self.funder_url:\n string = \"This feature was funded by [%s](%s)\" % (\n self.funded_by, self.funder_url)\n return string\n else:\n return string\n\n def developer_info_html(self):\n string = \"\"\n if self.developed_by and self.developer_url is None:\n string = \"\"\n return string\n elif self.developed_by and not self.developer_url:\n string = \"This feature was developed by %s \" % self.developed_by\n return string\n elif self.developer_url and not self.developed_by:\n string = \"This feature was developed by [%s](%s)\" % (\n self.developer_url, self.developer_url)\n return string\n elif self.developed_by and self.developer_url:\n string = \"This feature was developed by [%s](%s)\" % (\n self.developed_by, self.developer_url)\n return string\n else:\n return string\n", "path": "django_project/changes/models/entry.py" } ]
diff --git a/django_project/changes/models/entry.py b/django_project/changes/models/entry.py index b19f2cc34..c8636b92b 100644 --- a/django_project/changes/models/entry.py +++ b/django_project/changes/models/entry.py @@ -129,9 +129,7 @@ def __unicode__(self): def get_absolute_url(self): return reverse('entry-detail', kwargs={ - 'slug': self.slug, - 'version_slug': self.version.slug, - 'project_slug': self.version.project.slug + 'pk': self.id }) def funder_info_html(self):
mitmproxy__mitmproxy-1534
Add API to programatically create new requests. Original discussion: https://discourse.mitmproxy.org/t/are-there-any-script-examples-for-creating-a-get-post-put/134 It would be great if we'd have a dedicated API to make new requests from scripts, e.g. `master.new_request()` or something along those lines. This would probably just call `new_request` and then `replay_request`, but this seems useful enough to spell out explicitly.
[ { "content": "from __future__ import absolute_import, print_function, division\n\nimport os\nimport sys\n\nfrom typing import Optional # noqa\n\nimport netlib.exceptions\nfrom mitmproxy import controller\nfrom mitmproxy import exceptions\nfrom mitmproxy import models\nfrom mitmproxy.flow import io\nfrom mitmproxy.flow import modules\nfrom mitmproxy.onboarding import app\nfrom mitmproxy.protocol import http_replay\n\n\ndef event_sequence(f):\n if isinstance(f, models.HTTPFlow):\n if f.request:\n yield \"request\", f\n if f.response:\n yield \"responseheaders\", f\n yield \"response\", f\n if f.error:\n yield \"error\", f\n elif isinstance(f, models.TCPFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"tcp_open\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"tcp_message\", f\n if f.error:\n yield \"tcp_error\", f\n yield \"tcp_close\", f\n else:\n raise NotImplementedError\n\n\nclass FlowMaster(controller.Master):\n\n @property\n def server(self):\n # At some point, we may want to have support for multiple servers.\n # For now, this suffices.\n if len(self.servers) > 0:\n return self.servers[0]\n\n def __init__(self, options, server, state):\n super(FlowMaster, self).__init__(options)\n if server:\n self.add_server(server)\n self.state = state\n self.stream_large_bodies = None # type: Optional[modules.StreamLargeBodies]\n self.apps = modules.AppRegistry()\n\n def start_app(self, host, port):\n self.apps.add(app.mapp, host, port)\n\n def set_stream_large_bodies(self, max_size):\n if max_size is not None:\n self.stream_large_bodies = modules.StreamLargeBodies(max_size)\n else:\n self.stream_large_bodies = False\n\n def duplicate_flow(self, f):\n \"\"\"\n Duplicate flow, and insert it into state without triggering any of\n the normal flow events.\n \"\"\"\n f2 = f.copy()\n self.state.add_flow(f2)\n return f2\n\n def create_request(self, method, scheme, host, port, path):\n \"\"\"\n this method creates a new artificial and minimalist request also adds it to flowlist\n \"\"\"\n c = models.ClientConnection.make_dummy((\"\", 0))\n s = models.ServerConnection.make_dummy((host, port))\n\n f = models.HTTPFlow(c, s)\n headers = models.Headers()\n\n req = models.HTTPRequest(\n \"absolute\",\n method,\n scheme,\n host,\n port,\n path,\n b\"HTTP/1.1\",\n headers,\n b\"\"\n )\n f.request = req\n self.load_flow(f)\n return f\n\n def load_flow(self, f):\n \"\"\"\n Loads a flow\n \"\"\"\n if isinstance(f, models.HTTPFlow):\n if self.server and self.options.mode == \"reverse\":\n f.request.host = self.server.config.upstream_server.address.host\n f.request.port = self.server.config.upstream_server.address.port\n f.request.scheme = self.server.config.upstream_server.scheme\n f.reply = controller.DummyReply()\n for e, o in event_sequence(f):\n getattr(self, e)(o)\n\n def load_flows(self, fr):\n \"\"\"\n Load flows from a FlowReader object.\n \"\"\"\n cnt = 0\n for i in fr.stream():\n cnt += 1\n self.load_flow(i)\n return cnt\n\n def load_flows_file(self, path):\n path = os.path.expanduser(path)\n try:\n if path == \"-\":\n # This is incompatible with Python 3 - maybe we can use click?\n freader = io.FlowReader(sys.stdin)\n return self.load_flows(freader)\n else:\n with open(path, \"rb\") as f:\n freader = io.FlowReader(f)\n return self.load_flows(freader)\n except IOError as v:\n raise exceptions.FlowReadException(v.strerror)\n\n def replay_request(self, f, block=False):\n \"\"\"\n Replay a HTTP request to receive a new response from the server.\n\n Args:\n f: The flow to replay.\n block: If True, this function will wait for the replay to finish.\n This causes a deadlock if activated in the main thread.\n\n Returns:\n The thread object doing the replay.\n\n Raises:\n exceptions.ReplayException, if the flow is in a state\n where it is ineligible for replay.\n \"\"\"\n\n if f.live:\n raise exceptions.ReplayException(\n \"Can't replay live flow.\"\n )\n if f.intercepted:\n raise exceptions.ReplayException(\n \"Can't replay intercepted flow.\"\n )\n if f.request.raw_content is None:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing content.\"\n )\n if not f.request:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing request.\"\n )\n\n f.backup()\n f.request.is_replay = True\n\n f.response = None\n f.error = None\n\n rt = http_replay.RequestReplayThread(\n self.server.config,\n f,\n self.event_queue,\n self.should_exit\n )\n rt.start() # pragma: no cover\n if block:\n rt.join()\n return rt\n\n @controller.handler\n def log(self, l):\n self.add_log(l.msg, l.level)\n\n @controller.handler\n def clientconnect(self, root_layer):\n pass\n\n @controller.handler\n def clientdisconnect(self, root_layer):\n pass\n\n @controller.handler\n def serverconnect(self, server_conn):\n pass\n\n @controller.handler\n def serverdisconnect(self, server_conn):\n pass\n\n @controller.handler\n def next_layer(self, top_layer):\n pass\n\n @controller.handler\n def error(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def request(self, f):\n if f.live:\n app = self.apps.get(f.request)\n if app:\n err = app.serve(\n f,\n f.client_conn.wfile,\n **{\"mitmproxy.master\": self}\n )\n if err:\n self.add_log(\"Error in wsgi app. %s\" % err, \"error\")\n f.reply.kill()\n return\n if f not in self.state.flows: # don't add again on replay\n self.state.add_flow(f)\n\n @controller.handler\n def responseheaders(self, f):\n try:\n if self.stream_large_bodies:\n self.stream_large_bodies.run(f, False)\n except netlib.exceptions.HttpException:\n f.reply.kill()\n return\n\n @controller.handler\n def response(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def websockets_handshake(self, f):\n pass\n\n def handle_intercept(self, f):\n self.state.update_flow(f)\n\n def handle_accept_intercept(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def tcp_open(self, flow):\n # TODO: This would break mitmproxy currently.\n # self.state.add_flow(flow)\n pass\n\n @controller.handler\n def tcp_message(self, flow):\n pass\n\n @controller.handler\n def tcp_error(self, flow):\n pass\n\n @controller.handler\n def tcp_close(self, flow):\n pass\n", "path": "mitmproxy/flow/master.py" } ]
[ { "content": "from __future__ import absolute_import, print_function, division\n\nimport os\nimport sys\n\nfrom typing import Optional # noqa\n\nimport netlib.exceptions\nfrom mitmproxy import controller\nfrom mitmproxy import exceptions\nfrom mitmproxy import models\nfrom mitmproxy.flow import io\nfrom mitmproxy.flow import modules\nfrom mitmproxy.onboarding import app\nfrom mitmproxy.protocol import http_replay\n\n\ndef event_sequence(f):\n if isinstance(f, models.HTTPFlow):\n if f.request:\n yield \"request\", f\n if f.response:\n yield \"responseheaders\", f\n yield \"response\", f\n if f.error:\n yield \"error\", f\n elif isinstance(f, models.TCPFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"tcp_open\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"tcp_message\", f\n if f.error:\n yield \"tcp_error\", f\n yield \"tcp_close\", f\n else:\n raise NotImplementedError\n\n\nclass FlowMaster(controller.Master):\n\n @property\n def server(self):\n # At some point, we may want to have support for multiple servers.\n # For now, this suffices.\n if len(self.servers) > 0:\n return self.servers[0]\n\n def __init__(self, options, server, state):\n super(FlowMaster, self).__init__(options)\n if server:\n self.add_server(server)\n self.state = state\n self.stream_large_bodies = None # type: Optional[modules.StreamLargeBodies]\n self.apps = modules.AppRegistry()\n\n def start_app(self, host, port):\n self.apps.add(app.mapp, host, port)\n\n def set_stream_large_bodies(self, max_size):\n if max_size is not None:\n self.stream_large_bodies = modules.StreamLargeBodies(max_size)\n else:\n self.stream_large_bodies = False\n\n def duplicate_flow(self, f):\n \"\"\"\n Duplicate flow, and insert it into state without triggering any of\n the normal flow events.\n \"\"\"\n f2 = f.copy()\n self.state.add_flow(f2)\n return f2\n\n def create_request(self, method, scheme, host, port, path):\n \"\"\"\n this method creates a new artificial and minimalist request also adds it to flowlist\n \"\"\"\n c = models.ClientConnection.make_dummy((\"\", 0))\n s = models.ServerConnection.make_dummy((host, port))\n\n f = models.HTTPFlow(c, s)\n headers = models.Headers()\n\n req = models.HTTPRequest(\n \"absolute\",\n method,\n scheme,\n host,\n port,\n path,\n b\"HTTP/1.1\",\n headers,\n b\"\"\n )\n f.request = req\n self.load_flow(f)\n return f\n\n def load_flow(self, f):\n \"\"\"\n Loads a flow\n \"\"\"\n if isinstance(f, models.HTTPFlow):\n if self.server and self.options.mode == \"reverse\":\n f.request.host = self.server.config.upstream_server.address.host\n f.request.port = self.server.config.upstream_server.address.port\n f.request.scheme = self.server.config.upstream_server.scheme\n f.reply = controller.DummyReply()\n for e, o in event_sequence(f):\n getattr(self, e)(o)\n\n def load_flows(self, fr):\n \"\"\"\n Load flows from a FlowReader object.\n \"\"\"\n cnt = 0\n for i in fr.stream():\n cnt += 1\n self.load_flow(i)\n return cnt\n\n def load_flows_file(self, path):\n path = os.path.expanduser(path)\n try:\n if path == \"-\":\n # This is incompatible with Python 3 - maybe we can use click?\n freader = io.FlowReader(sys.stdin)\n return self.load_flows(freader)\n else:\n with open(path, \"rb\") as f:\n freader = io.FlowReader(f)\n return self.load_flows(freader)\n except IOError as v:\n raise exceptions.FlowReadException(v.strerror)\n\n def replay_request(self, f, block=False):\n \"\"\"\n Replay a HTTP request to receive a new response from the server.\n\n Args:\n f: The flow to replay.\n block: If True, this function will wait for the replay to finish.\n This causes a deadlock if activated in the main thread.\n\n Returns:\n The thread object doing the replay.\n\n Raises:\n exceptions.ReplayException, if the flow is in a state\n where it is ineligible for replay.\n \"\"\"\n\n if f.live:\n raise exceptions.ReplayException(\n \"Can't replay live flow.\"\n )\n if f.intercepted:\n raise exceptions.ReplayException(\n \"Can't replay intercepted flow.\"\n )\n if f.request.raw_content is None:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing content.\"\n )\n if not f.request:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing request.\"\n )\n\n f.backup()\n f.request.is_replay = True\n\n f.response = None\n f.error = None\n\n rt = http_replay.RequestReplayThread(\n self.server.config,\n f,\n self.event_queue,\n self.should_exit\n )\n rt.start() # pragma: no cover\n if block:\n rt.join()\n return rt\n\n def new_request(self, method, scheme, host, port, path, block=False):\n f = self.create_request(method, scheme, host, port, path)\n return self.replay_request(f, block)\n\n @controller.handler\n def log(self, l):\n self.add_log(l.msg, l.level)\n\n @controller.handler\n def clientconnect(self, root_layer):\n pass\n\n @controller.handler\n def clientdisconnect(self, root_layer):\n pass\n\n @controller.handler\n def serverconnect(self, server_conn):\n pass\n\n @controller.handler\n def serverdisconnect(self, server_conn):\n pass\n\n @controller.handler\n def next_layer(self, top_layer):\n pass\n\n @controller.handler\n def error(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def request(self, f):\n if f.live:\n app = self.apps.get(f.request)\n if app:\n err = app.serve(\n f,\n f.client_conn.wfile,\n **{\"mitmproxy.master\": self}\n )\n if err:\n self.add_log(\"Error in wsgi app. %s\" % err, \"error\")\n f.reply.kill()\n return\n if f not in self.state.flows: # don't add again on replay\n self.state.add_flow(f)\n\n @controller.handler\n def responseheaders(self, f):\n try:\n if self.stream_large_bodies:\n self.stream_large_bodies.run(f, False)\n except netlib.exceptions.HttpException:\n f.reply.kill()\n return\n\n @controller.handler\n def response(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def websockets_handshake(self, f):\n pass\n\n def handle_intercept(self, f):\n self.state.update_flow(f)\n\n def handle_accept_intercept(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def tcp_open(self, flow):\n # TODO: This would break mitmproxy currently.\n # self.state.add_flow(flow)\n pass\n\n @controller.handler\n def tcp_message(self, flow):\n pass\n\n @controller.handler\n def tcp_error(self, flow):\n pass\n\n @controller.handler\n def tcp_close(self, flow):\n pass\n", "path": "mitmproxy/flow/master.py" } ]
diff --git a/mitmproxy/flow/master.py b/mitmproxy/flow/master.py index 80f633cd65..144323576b 100644 --- a/mitmproxy/flow/master.py +++ b/mitmproxy/flow/master.py @@ -187,6 +187,10 @@ def replay_request(self, f, block=False): rt.join() return rt + def new_request(self, method, scheme, host, port, path, block=False): + f = self.create_request(method, scheme, host, port, path) + return self.replay_request(f, block) + @controller.handler def log(self, l): self.add_log(l.msg, l.level) diff --git a/test/mitmproxy/test_flow.py b/test/mitmproxy/test_flow.py index 0fe45afb24..8212635388 100644 --- a/test/mitmproxy/test_flow.py +++ b/test/mitmproxy/test_flow.py @@ -463,6 +463,15 @@ def test_duplicate_flow(self): assert s.flow_count() == 2 assert s.index(f2) == 1 + def test_new_request(self): + s = flow.State() + fm = flow.FlowMaster( + options.Options(), + DummyServer(ProxyConfig(options.Options())), + s + ) + assert fm.new_request("GET", "http", "example.com", 80, "/") + def test_create_flow(self): s = flow.State() fm = flow.FlowMaster(None, None, s)
nilearn__nilearn-1152
Automatically loading colormaps when importing nilearn (or cm?) The colormaps that are created in [cm.py](https://github.com/KirstieJane/nilearn/blob/40069cb14b43333a73137797eac895cfb054db29/nilearn/plotting/cm.py) are really nice! Currently, if I want to use them in one of my wrapper scripts I have the following hack: ``` from nilearn.plotting import cm cmap = 'cold_white_hot' if hasattr(cm, cmap): cmap = getatt(cm, cmap) ``` Would it be possible to have these colormaps already loaded so they're available by name without this code?
[ { "content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nMatplotlib colormaps useful for neuroimaging.\n\"\"\"\nimport numpy as _np\n\nfrom matplotlib import cm as _cm\nfrom matplotlib import colors as _colors\n\n################################################################################\n# Custom colormaps for two-tailed symmetric statistics\n################################################################################\n\n################################################################################\n# Helper functions\n\ndef _rotate_cmap(cmap, swap_order=('green', 'red', 'blue')):\n \"\"\" Utility function to swap the colors of a colormap.\n \"\"\"\n orig_cdict = cmap._segmentdata.copy()\n\n cdict = dict()\n cdict['green'] = [(p, c1, c2)\n for (p, c1, c2) in orig_cdict[swap_order[0]]]\n cdict['blue'] = [(p, c1, c2)\n for (p, c1, c2) in orig_cdict[swap_order[1]]]\n cdict['red'] = [(p, c1, c2)\n for (p, c1, c2) in orig_cdict[swap_order[2]]]\n\n return cdict\n\n\ndef _pigtailed_cmap(cmap, swap_order=('green', 'red', 'blue')):\n \"\"\" Utility function to make a new colormap by concatenating a\n colormap with its reverse.\n \"\"\"\n orig_cdict = cmap._segmentdata.copy()\n\n cdict = dict()\n cdict['green'] = [(0.5*(1-p), c1, c2)\n for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])]\n cdict['blue'] = [(0.5*(1-p), c1, c2)\n for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])]\n cdict['red'] = [(0.5*(1-p), c1, c2)\n for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])]\n\n for color in ('red', 'green', 'blue'):\n cdict[color].extend([(0.5*(1+p), c1, c2) \n for (p, c1, c2) in orig_cdict[color]])\n\n return cdict\n\n\ndef _concat_cmap(cmap1, cmap2):\n \"\"\" Utility function to make a new colormap by concatenating two\n colormaps.\n \"\"\"\n cdict = dict()\n\n cdict1 = cmap1._segmentdata.copy()\n cdict2 = cmap2._segmentdata.copy()\n if not hasattr(cdict1['red'], '__call__'):\n for c in ['red', 'green', 'blue']:\n cdict[c] = [(0.5*p, c1, c2) for (p, c1, c2) in cdict1[c]]\n else:\n for c in ['red', 'green', 'blue']:\n cdict[c] = []\n ps = _np.linspace(0, 1, 10)\n colors = cmap1(ps)\n for p, (r, g, b, a) in zip(ps, colors):\n cdict['red'].append((.5*p, r, r))\n cdict['green'].append((.5*p, g, g))\n cdict['blue'].append((.5*p, b, b))\n if not hasattr(cdict2['red'], '__call__'):\n for c in ['red', 'green', 'blue']:\n cdict[c].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in cdict2[c]])\n else:\n ps = _np.linspace(0, 1, 10)\n colors = cmap2(ps)\n for p, (r, g, b, a) in zip(ps, colors):\n cdict['red'].append((.5*(1+p), r, r))\n cdict['green'].append((.5*(1+p), g, g))\n cdict['blue'].append((.5*(1+p), b, b))\n\n return cdict\n\n\ndef alpha_cmap(color, name='', alpha_min=0.5, alpha_max=1.):\n \"\"\" Return a colormap with the given color, and alpha going from\n zero to 1.\n\n Parameters\n ----------\n color: (r, g, b), or a string\n A triplet of floats ranging from 0 to 1, or a matplotlib\n color string\n \"\"\"\n red, green, blue = _colors.colorConverter.to_rgb(color)\n if name == '' and hasattr(color, 'startswith'):\n name = color\n cmapspec = [(red, green, blue, 1.),\n (red, green, blue, 1.),\n ]\n cmap = _colors.LinearSegmentedColormap.from_list(\n '%s_transparent' % name, cmapspec, _cm.LUTSIZE)\n cmap._init()\n cmap._lut[:, -1] = _np.linspace(alpha_min, alpha_max, cmap._lut.shape[0])\n cmap._lut[-1, -1] = 0\n return cmap\n\n\n\n################################################################################\n# Our colormaps definition\n_cmaps_data = dict(\n cold_hot = _pigtailed_cmap(_cm.hot),\n cold_white_hot = _pigtailed_cmap(_cm.hot_r),\n brown_blue = _pigtailed_cmap(_cm.bone),\n cyan_copper = _pigtailed_cmap(_cm.copper),\n cyan_orange = _pigtailed_cmap(_cm.YlOrBr_r),\n blue_red = _pigtailed_cmap(_cm.Reds_r),\n brown_cyan = _pigtailed_cmap(_cm.Blues_r),\n purple_green = _pigtailed_cmap(_cm.Greens_r,\n swap_order=('red', 'blue', 'green')),\n purple_blue = _pigtailed_cmap(_cm.Blues_r,\n swap_order=('red', 'blue', 'green')),\n blue_orange = _pigtailed_cmap(_cm.Oranges_r,\n swap_order=('green', 'red', 'blue')),\n black_blue = _rotate_cmap(_cm.hot),\n black_purple = _rotate_cmap(_cm.hot,\n swap_order=('blue', 'red', 'green')),\n black_pink = _rotate_cmap(_cm.hot,\n swap_order=('blue', 'green', 'red')),\n black_green = _rotate_cmap(_cm.hot,\n swap_order=('red', 'blue', 'green')),\n black_red = _cm.hot._segmentdata.copy(),\n)\n\nif hasattr(_cm, 'ocean'):\n # MPL 0.99 doesn't have Ocean\n _cmaps_data['ocean_hot'] = _concat_cmap(_cm.ocean, _cm.hot_r)\nif hasattr(_cm, 'afmhot'): # or afmhot\n _cmaps_data['hot_white_bone'] = _concat_cmap(_cm.afmhot, _cm.bone_r)\n _cmaps_data['hot_black_bone'] = _concat_cmap(_cm.afmhot_r, _cm.bone)\n\n# Copied from matplotlib 1.2.0 for matplotlib 0.99 compatibility.\n_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))\n_cmaps_data['bwr'] = _colors.LinearSegmentedColormap.from_list(\n 'bwr', _bwr_data)._segmentdata.copy()\n\n################################################################################\n# Build colormaps and their reverse.\n_cmap_d = dict()\n\nfor _cmapname in list(_cmaps_data.keys()): # needed as dict changes within loop\n _cmapname_r = _cmapname + '_r'\n _cmapspec = _cmaps_data[_cmapname]\n _cmaps_data[_cmapname_r] = _cm.revcmap(_cmapspec)\n _cmap_d[_cmapname] = _colors.LinearSegmentedColormap(\n _cmapname, _cmapspec, _cm.LUTSIZE)\n _cmap_d[_cmapname_r] = _colors.LinearSegmentedColormap(\n _cmapname_r, _cmaps_data[_cmapname_r],\n _cm.LUTSIZE)\n\n################################################################################\n# A few transparent colormaps\nfor color, name in (((1, 0, 0), 'red'),\n ((0, 1, 0), 'blue'),\n ((0, 0, 1), 'green'),\n ):\n _cmap_d['%s_transparent' % name] = alpha_cmap(color, name=name)\n _cmap_d['%s_transparent_full_alpha_range' % name] = alpha_cmap(\n color, alpha_min=0,\n alpha_max=1, name=name)\n\n\nlocals().update(_cmap_d)\n\n\n################################################################################\n# Utility to replace a colormap by another in an interval\n################################################################################\n\ndef dim_cmap(cmap, factor=.3, to_white=True):\n \"\"\" Dim a colormap to white, or to black.\n \"\"\"\n assert factor >= 0 and factor <=1, ValueError(\n 'Dimming factor must be larger than 0 and smaller than 1, %s was passed.' \n % factor)\n if to_white:\n dimmer = lambda c: 1 - factor*(1-c)\n else:\n dimmer = lambda c: factor*c\n cdict = cmap._segmentdata.copy()\n for c_index, color in enumerate(('red', 'green', 'blue')):\n color_lst = list()\n for value, c1, c2 in cdict[color]:\n color_lst.append((value, dimmer(c1), dimmer(c2)))\n cdict[color] = color_lst\n\n return _colors.LinearSegmentedColormap(\n '%s_dimmed' % cmap.name,\n cdict,\n _cm.LUTSIZE)\n\n\ndef replace_inside(outer_cmap, inner_cmap, vmin, vmax):\n \"\"\" Replace a colormap by another inside a pair of values.\n \"\"\"\n assert vmin < vmax, ValueError('vmin must be smaller than vmax')\n assert vmin >= 0, ValueError('vmin must be larger than 0, %s was passed.' \n % vmin)\n assert vmax <= 1, ValueError('vmax must be smaller than 1, %s was passed.' \n % vmax)\n outer_cdict = outer_cmap._segmentdata.copy()\n inner_cdict = inner_cmap._segmentdata.copy()\n\n cdict = dict()\n for this_cdict, cmap in [(outer_cdict, outer_cmap),\n (inner_cdict, inner_cmap)]:\n if hasattr(this_cdict['red'], '__call__'):\n ps = _np.linspace(0, 1, 25)\n colors = cmap(ps)\n this_cdict['red'] = list()\n this_cdict['green'] = list()\n this_cdict['blue'] = list()\n for p, (r, g, b, a) in zip(ps, colors):\n this_cdict['red'].append((p, r, r))\n this_cdict['green'].append((p, g, g))\n this_cdict['blue'].append((p, b, b))\n\n\n for c_index, color in enumerate(('red', 'green', 'blue')):\n color_lst = list()\n\n for value, c1, c2 in outer_cdict[color]:\n if value >= vmin:\n break\n color_lst.append((value, c1, c2))\n\n color_lst.append((vmin, outer_cmap(vmin)[c_index], \n inner_cmap(vmin)[c_index]))\n\n for value, c1, c2 in inner_cdict[color]:\n if value <= vmin:\n continue\n if value >= vmax:\n break\n color_lst.append((value, c1, c2))\n\n color_lst.append((vmax, inner_cmap(vmax)[c_index],\n outer_cmap(vmax)[c_index]))\n\n for value, c1, c2 in outer_cdict[color]:\n if value <= vmax:\n continue\n color_lst.append((value, c1, c2))\n\n cdict[color] = color_lst\n\n return _colors.LinearSegmentedColormap(\n '%s_inside_%s' % (inner_cmap.name, outer_cmap.name),\n cdict,\n _cm.LUTSIZE)\n\n\n", "path": "nilearn/plotting/cm.py" } ]
[ { "content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nMatplotlib colormaps useful for neuroimaging.\n\"\"\"\nimport numpy as _np\n\nfrom matplotlib import cm as _cm\nfrom matplotlib import colors as _colors\n\n################################################################################\n# Custom colormaps for two-tailed symmetric statistics\n################################################################################\n\n################################################################################\n# Helper functions\n\ndef _rotate_cmap(cmap, swap_order=('green', 'red', 'blue')):\n \"\"\" Utility function to swap the colors of a colormap.\n \"\"\"\n orig_cdict = cmap._segmentdata.copy()\n\n cdict = dict()\n cdict['green'] = [(p, c1, c2)\n for (p, c1, c2) in orig_cdict[swap_order[0]]]\n cdict['blue'] = [(p, c1, c2)\n for (p, c1, c2) in orig_cdict[swap_order[1]]]\n cdict['red'] = [(p, c1, c2)\n for (p, c1, c2) in orig_cdict[swap_order[2]]]\n\n return cdict\n\n\ndef _pigtailed_cmap(cmap, swap_order=('green', 'red', 'blue')):\n \"\"\" Utility function to make a new colormap by concatenating a\n colormap with its reverse.\n \"\"\"\n orig_cdict = cmap._segmentdata.copy()\n\n cdict = dict()\n cdict['green'] = [(0.5*(1-p), c1, c2)\n for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])]\n cdict['blue'] = [(0.5*(1-p), c1, c2)\n for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])]\n cdict['red'] = [(0.5*(1-p), c1, c2)\n for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])]\n\n for color in ('red', 'green', 'blue'):\n cdict[color].extend([(0.5*(1+p), c1, c2) \n for (p, c1, c2) in orig_cdict[color]])\n\n return cdict\n\n\ndef _concat_cmap(cmap1, cmap2):\n \"\"\" Utility function to make a new colormap by concatenating two\n colormaps.\n \"\"\"\n cdict = dict()\n\n cdict1 = cmap1._segmentdata.copy()\n cdict2 = cmap2._segmentdata.copy()\n if not hasattr(cdict1['red'], '__call__'):\n for c in ['red', 'green', 'blue']:\n cdict[c] = [(0.5*p, c1, c2) for (p, c1, c2) in cdict1[c]]\n else:\n for c in ['red', 'green', 'blue']:\n cdict[c] = []\n ps = _np.linspace(0, 1, 10)\n colors = cmap1(ps)\n for p, (r, g, b, a) in zip(ps, colors):\n cdict['red'].append((.5*p, r, r))\n cdict['green'].append((.5*p, g, g))\n cdict['blue'].append((.5*p, b, b))\n if not hasattr(cdict2['red'], '__call__'):\n for c in ['red', 'green', 'blue']:\n cdict[c].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in cdict2[c]])\n else:\n ps = _np.linspace(0, 1, 10)\n colors = cmap2(ps)\n for p, (r, g, b, a) in zip(ps, colors):\n cdict['red'].append((.5*(1+p), r, r))\n cdict['green'].append((.5*(1+p), g, g))\n cdict['blue'].append((.5*(1+p), b, b))\n\n return cdict\n\n\ndef alpha_cmap(color, name='', alpha_min=0.5, alpha_max=1.):\n \"\"\" Return a colormap with the given color, and alpha going from\n zero to 1.\n\n Parameters\n ----------\n color: (r, g, b), or a string\n A triplet of floats ranging from 0 to 1, or a matplotlib\n color string\n \"\"\"\n red, green, blue = _colors.colorConverter.to_rgb(color)\n if name == '' and hasattr(color, 'startswith'):\n name = color\n cmapspec = [(red, green, blue, 1.),\n (red, green, blue, 1.),\n ]\n cmap = _colors.LinearSegmentedColormap.from_list(\n '%s_transparent' % name, cmapspec, _cm.LUTSIZE)\n cmap._init()\n cmap._lut[:, -1] = _np.linspace(alpha_min, alpha_max, cmap._lut.shape[0])\n cmap._lut[-1, -1] = 0\n return cmap\n\n\n\n################################################################################\n# Our colormaps definition\n_cmaps_data = dict(\n cold_hot = _pigtailed_cmap(_cm.hot),\n cold_white_hot = _pigtailed_cmap(_cm.hot_r),\n brown_blue = _pigtailed_cmap(_cm.bone),\n cyan_copper = _pigtailed_cmap(_cm.copper),\n cyan_orange = _pigtailed_cmap(_cm.YlOrBr_r),\n blue_red = _pigtailed_cmap(_cm.Reds_r),\n brown_cyan = _pigtailed_cmap(_cm.Blues_r),\n purple_green = _pigtailed_cmap(_cm.Greens_r,\n swap_order=('red', 'blue', 'green')),\n purple_blue = _pigtailed_cmap(_cm.Blues_r,\n swap_order=('red', 'blue', 'green')),\n blue_orange = _pigtailed_cmap(_cm.Oranges_r,\n swap_order=('green', 'red', 'blue')),\n black_blue = _rotate_cmap(_cm.hot),\n black_purple = _rotate_cmap(_cm.hot,\n swap_order=('blue', 'red', 'green')),\n black_pink = _rotate_cmap(_cm.hot,\n swap_order=('blue', 'green', 'red')),\n black_green = _rotate_cmap(_cm.hot,\n swap_order=('red', 'blue', 'green')),\n black_red = _cm.hot._segmentdata.copy(),\n)\n\nif hasattr(_cm, 'ocean'):\n # MPL 0.99 doesn't have Ocean\n _cmaps_data['ocean_hot'] = _concat_cmap(_cm.ocean, _cm.hot_r)\nif hasattr(_cm, 'afmhot'): # or afmhot\n _cmaps_data['hot_white_bone'] = _concat_cmap(_cm.afmhot, _cm.bone_r)\n _cmaps_data['hot_black_bone'] = _concat_cmap(_cm.afmhot_r, _cm.bone)\n\n# Copied from matplotlib 1.2.0 for matplotlib 0.99 compatibility.\n_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))\n_cmaps_data['bwr'] = _colors.LinearSegmentedColormap.from_list(\n 'bwr', _bwr_data)._segmentdata.copy()\n\n################################################################################\n# Build colormaps and their reverse.\n_cmap_d = dict()\n\nfor _cmapname in list(_cmaps_data.keys()): # needed as dict changes within loop\n _cmapname_r = _cmapname + '_r'\n _cmapspec = _cmaps_data[_cmapname]\n _cmaps_data[_cmapname_r] = _cm.revcmap(_cmapspec)\n _cmap_d[_cmapname] = _colors.LinearSegmentedColormap(\n _cmapname, _cmapspec, _cm.LUTSIZE)\n _cmap_d[_cmapname_r] = _colors.LinearSegmentedColormap(\n _cmapname_r, _cmaps_data[_cmapname_r],\n _cm.LUTSIZE)\n\n################################################################################\n# A few transparent colormaps\nfor color, name in (((1, 0, 0), 'red'),\n ((0, 1, 0), 'blue'),\n ((0, 0, 1), 'green'),\n ):\n _cmap_d['%s_transparent' % name] = alpha_cmap(color, name=name)\n _cmap_d['%s_transparent_full_alpha_range' % name] = alpha_cmap(\n color, alpha_min=0,\n alpha_max=1, name=name)\n\n\n# Save colormaps in the scope of the module\nlocals().update(_cmap_d)\n# Register cmaps in matplotlib too\nfor k, v in _cmap_d.items():\n _cm.register_cmap(name=k, cmap=v)\n\n\n################################################################################\n# Utility to replace a colormap by another in an interval\n################################################################################\n\ndef dim_cmap(cmap, factor=.3, to_white=True):\n \"\"\" Dim a colormap to white, or to black.\n \"\"\"\n assert factor >= 0 and factor <=1, ValueError(\n 'Dimming factor must be larger than 0 and smaller than 1, %s was passed.' \n % factor)\n if to_white:\n dimmer = lambda c: 1 - factor*(1-c)\n else:\n dimmer = lambda c: factor*c\n cdict = cmap._segmentdata.copy()\n for c_index, color in enumerate(('red', 'green', 'blue')):\n color_lst = list()\n for value, c1, c2 in cdict[color]:\n color_lst.append((value, dimmer(c1), dimmer(c2)))\n cdict[color] = color_lst\n\n return _colors.LinearSegmentedColormap(\n '%s_dimmed' % cmap.name,\n cdict,\n _cm.LUTSIZE)\n\n\ndef replace_inside(outer_cmap, inner_cmap, vmin, vmax):\n \"\"\" Replace a colormap by another inside a pair of values.\n \"\"\"\n assert vmin < vmax, ValueError('vmin must be smaller than vmax')\n assert vmin >= 0, ValueError('vmin must be larger than 0, %s was passed.' \n % vmin)\n assert vmax <= 1, ValueError('vmax must be smaller than 1, %s was passed.' \n % vmax)\n outer_cdict = outer_cmap._segmentdata.copy()\n inner_cdict = inner_cmap._segmentdata.copy()\n\n cdict = dict()\n for this_cdict, cmap in [(outer_cdict, outer_cmap),\n (inner_cdict, inner_cmap)]:\n if hasattr(this_cdict['red'], '__call__'):\n ps = _np.linspace(0, 1, 25)\n colors = cmap(ps)\n this_cdict['red'] = list()\n this_cdict['green'] = list()\n this_cdict['blue'] = list()\n for p, (r, g, b, a) in zip(ps, colors):\n this_cdict['red'].append((p, r, r))\n this_cdict['green'].append((p, g, g))\n this_cdict['blue'].append((p, b, b))\n\n\n for c_index, color in enumerate(('red', 'green', 'blue')):\n color_lst = list()\n\n for value, c1, c2 in outer_cdict[color]:\n if value >= vmin:\n break\n color_lst.append((value, c1, c2))\n\n color_lst.append((vmin, outer_cmap(vmin)[c_index], \n inner_cmap(vmin)[c_index]))\n\n for value, c1, c2 in inner_cdict[color]:\n if value <= vmin:\n continue\n if value >= vmax:\n break\n color_lst.append((value, c1, c2))\n\n color_lst.append((vmax, inner_cmap(vmax)[c_index],\n outer_cmap(vmax)[c_index]))\n\n for value, c1, c2 in outer_cdict[color]:\n if value <= vmax:\n continue\n color_lst.append((value, c1, c2))\n\n cdict[color] = color_lst\n\n return _colors.LinearSegmentedColormap(\n '%s_inside_%s' % (inner_cmap.name, outer_cmap.name),\n cdict,\n _cm.LUTSIZE)\n\n\n", "path": "nilearn/plotting/cm.py" } ]
diff --git a/nilearn/plotting/cm.py b/nilearn/plotting/cm.py index d11805ed77..72bbe7c828 100644 --- a/nilearn/plotting/cm.py +++ b/nilearn/plotting/cm.py @@ -175,7 +175,11 @@ def alpha_cmap(color, name='', alpha_min=0.5, alpha_max=1.): alpha_max=1, name=name) +# Save colormaps in the scope of the module locals().update(_cmap_d) +# Register cmaps in matplotlib too +for k, v in _cmap_d.items(): + _cm.register_cmap(name=k, cmap=v) ################################################################################ diff --git a/nilearn/plotting/tests/test_cm.py b/nilearn/plotting/tests/test_cm.py index 92554bbd06..36e44058eb 100644 --- a/nilearn/plotting/tests/test_cm.py +++ b/nilearn/plotting/tests/test_cm.py @@ -19,3 +19,7 @@ def test_replace_inside(): if hasattr(plt.cm, 'gnuplot'): # gnuplot is only in recent version of MPL replace_inside(plt.cm.gnuplot, plt.cm.gnuplot2, .2, .8) + + +def test_cm_preload(): + plt.imshow([list(range(10))], cmap="cold_hot")
wagtail__wagtail-6442
Incorrectly configured search backend breaks page publishing (URLS always has to be a list) ### Issue Summary I ran into this issue that is probably more a quality of life thing. ### Steps to Reproduce 1. Have a search backend configured with (as I eventually deduced), URLs set to a single value and not a list. Example: ``` if "ELASTICSEARCH_HOST" in os.environ: WAGTAILSEARCH_BACKENDS["elasticsearch"] = { "BACKEND": "wagtail.search.backends.elasticsearch7", "URLS": os.environ["ELASTICSEARCH_HOST"], "AUTO_UPDATE": True, "ATOMIC_REBUILD": True, } ``` In this case what I was fulling was a single host url: http://elasticsearch:9200 2. Try to save a page, or even a draft of a page 3. Wagtail explodes: https://dpaste.com/FA8FKYRS3 It took me awhile to trace through and find where the host was missing as the error message really doesn't point you in the right direction. I think the problem may only be with Elasticsearch, but I haven't tried other backends. The doc examples are clear and always show URLS wrapped as a list. But my instinct if I have a single value is not to cast it to a list. At the very least a note in the docs might help. Even better would be catching the error and deciding how to recover a bit more gracefully and pointing at the problem. Best would be to handle this a bit better. I think the code is here: https://github.com/wagtail/wagtail/blob/8251b4a6c7e1660e16e721a9f45669986a2699a4/wagtail/search/backends/elasticsearch2.py#L1070 I think we could do a simple check. Say, if the item is a list, or cast it to a list if not. Now there could be complications with that, obviously, which is why this is just a bug report so far and not a fix. :-D Short of that, we should be able to check if the value is a list and raise an error if not. * I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: no ### Technical details * Python version: 3.8 * Django version: 3.1.1 * Wagtail version: 2.10 * Browser version: Chrome 85
[ { "content": "import copy\nimport json\nfrom collections import OrderedDict\nfrom urllib.parse import urlparse\n\nfrom django.db import DEFAULT_DB_ALIAS, models\nfrom django.db.models.sql import Query\nfrom django.db.models.sql.constants import MULTI\nfrom django.utils.crypto import get_random_string\nfrom elasticsearch import Elasticsearch, NotFoundError\nfrom elasticsearch.helpers import bulk\n\nfrom wagtail.search.backends.base import (\n BaseSearchBackend, BaseSearchQueryCompiler, BaseSearchResults, FilterFieldError)\nfrom wagtail.search.index import (\n AutocompleteField, FilterField, Indexed, RelatedFields, SearchField, class_is_indexed)\nfrom wagtail.search.query import And, Boost, MatchAll, Not, Or, Phrase, PlainText\nfrom wagtail.utils.utils import deep_update\n\n\ndef get_model_root(model):\n \"\"\"\n This function finds the root model for any given model. The root model is\n the highest concrete model that it descends from. If the model doesn't\n descend from another concrete model then the model is it's own root model so\n it is returned.\n\n Examples:\n >>> get_model_root(wagtailcore.Page)\n wagtailcore.Page\n\n >>> get_model_root(myapp.HomePage)\n wagtailcore.Page\n\n >>> get_model_root(wagtailimages.Image)\n wagtailimages.Image\n \"\"\"\n if model._meta.parents:\n parent_model = list(model._meta.parents.items())[0][0]\n return get_model_root(parent_model)\n\n return model\n\n\nclass Elasticsearch2Mapping:\n all_field_name = '_all'\n\n # Was originally named '_partials' but renamed '_edgengrams' when we added Elasticsearch 6 support\n # The ES 2 and 5 backends still use the old name for backwards compatibility\n edgengrams_field_name = '_partials'\n\n type_map = {\n 'AutoField': 'integer',\n 'BinaryField': 'binary',\n 'BooleanField': 'boolean',\n 'CharField': 'string',\n 'CommaSeparatedIntegerField': 'string',\n 'DateField': 'date',\n 'DateTimeField': 'date',\n 'DecimalField': 'double',\n 'FileField': 'string',\n 'FilePathField': 'string',\n 'FloatField': 'double',\n 'IntegerField': 'integer',\n 'BigIntegerField': 'long',\n 'IPAddressField': 'string',\n 'GenericIPAddressField': 'string',\n 'NullBooleanField': 'boolean',\n 'PositiveIntegerField': 'integer',\n 'PositiveSmallIntegerField': 'integer',\n 'SlugField': 'string',\n 'SmallIntegerField': 'integer',\n 'TextField': 'string',\n 'TimeField': 'date',\n }\n\n keyword_type = 'string'\n text_type = 'string'\n set_index_not_analyzed_on_filter_fields = True\n edgengram_analyzer_config = {\n 'analyzer': 'edgengram_analyzer',\n 'search_analyzer': 'standard',\n }\n\n def __init__(self, model):\n self.model = model\n\n def get_parent(self):\n for base in self.model.__bases__:\n if issubclass(base, Indexed) and issubclass(base, models.Model):\n return type(self)(base)\n\n def get_document_type(self):\n return self.model.indexed_get_content_type()\n\n def get_field_column_name(self, field):\n # Fields in derived models get prefixed with their model name, fields\n # in the root model don't get prefixed at all\n # This is to prevent mapping clashes in cases where two page types have\n # a field with the same name but a different type.\n root_model = get_model_root(self.model)\n definition_model = field.get_definition_model(self.model)\n\n if definition_model != root_model:\n prefix = definition_model._meta.app_label.lower() + '_' + definition_model.__name__.lower() + '__'\n else:\n prefix = ''\n\n if isinstance(field, FilterField):\n return prefix + field.get_attname(self.model) + '_filter'\n elif isinstance(field, AutocompleteField):\n return prefix + field.get_attname(self.model) + '_edgengrams'\n elif isinstance(field, SearchField):\n return prefix + field.get_attname(self.model)\n elif isinstance(field, RelatedFields):\n return prefix + field.field_name\n\n def get_content_type(self):\n \"\"\"\n Returns the content type as a string for the model.\n\n For example: \"wagtailcore.Page\"\n \"myapp.MyModel\"\n \"\"\"\n return self.model._meta.app_label + '.' + self.model.__name__\n\n def get_all_content_types(self):\n \"\"\"\n Returns all the content type strings that apply to this model.\n This includes the models' content type and all concrete ancestor\n models that inherit from Indexed.\n\n For example: [\"myapp.MyPageModel\", \"wagtailcore.Page\"]\n [\"myapp.MyModel\"]\n \"\"\"\n # Add our content type\n content_types = [self.get_content_type()]\n\n # Add all ancestor classes content types as well\n ancestor = self.get_parent()\n while ancestor:\n content_types.append(ancestor.get_content_type())\n ancestor = ancestor.get_parent()\n\n return content_types\n\n def get_field_mapping(self, field):\n if isinstance(field, RelatedFields):\n mapping = {'type': 'nested', 'properties': {}}\n nested_model = field.get_field(self.model).related_model\n nested_mapping = type(self)(nested_model)\n\n for sub_field in field.fields:\n sub_field_name, sub_field_mapping = nested_mapping.get_field_mapping(sub_field)\n mapping['properties'][sub_field_name] = sub_field_mapping\n\n return self.get_field_column_name(field), mapping\n else:\n mapping = {'type': self.type_map.get(field.get_type(self.model), 'string')}\n\n if isinstance(field, SearchField):\n if mapping['type'] == 'string':\n mapping['type'] = self.text_type\n\n if field.boost:\n mapping['boost'] = field.boost\n\n if field.partial_match:\n mapping.update(self.edgengram_analyzer_config)\n\n mapping['include_in_all'] = True\n\n if isinstance(field, AutocompleteField):\n mapping['type'] = self.text_type\n mapping['include_in_all'] = False\n mapping.update(self.edgengram_analyzer_config)\n\n elif isinstance(field, FilterField):\n if mapping['type'] == 'string':\n mapping['type'] = self.keyword_type\n\n if self.set_index_not_analyzed_on_filter_fields:\n # Not required on ES5 as that uses the \"keyword\" type for\n # filtered string fields\n mapping['index'] = 'not_analyzed'\n\n mapping['include_in_all'] = False\n\n if 'es_extra' in field.kwargs:\n for key, value in field.kwargs['es_extra'].items():\n mapping[key] = value\n\n return self.get_field_column_name(field), mapping\n\n def get_mapping(self):\n # Make field list\n fields = {\n 'pk': dict(type=self.keyword_type, store=True, include_in_all=False),\n 'content_type': dict(type=self.keyword_type, include_in_all=False),\n self.edgengrams_field_name: dict(type=self.text_type, include_in_all=False),\n }\n fields[self.edgengrams_field_name].update(self.edgengram_analyzer_config)\n\n if self.set_index_not_analyzed_on_filter_fields:\n # Not required on ES5 as that uses the \"keyword\" type for\n # filtered string fields\n fields['pk']['index'] = 'not_analyzed'\n fields['content_type']['index'] = 'not_analyzed'\n\n fields.update(dict(\n self.get_field_mapping(field) for field in self.model.get_search_fields()\n ))\n\n return {\n self.get_document_type(): {\n 'properties': fields,\n }\n }\n\n def get_document_id(self, obj):\n return obj.indexed_get_toplevel_content_type() + ':' + str(obj.pk)\n\n def _get_nested_document(self, fields, obj):\n doc = {}\n edgengrams = []\n model = type(obj)\n mapping = type(self)(model)\n\n for field in fields:\n value = field.get_value(obj)\n doc[mapping.get_field_column_name(field)] = value\n\n # Check if this field should be added into _edgengrams\n if (isinstance(field, SearchField) and field.partial_match) or isinstance(field, AutocompleteField):\n edgengrams.append(value)\n\n return doc, edgengrams\n\n def get_document(self, obj):\n # Build document\n doc = dict(pk=str(obj.pk), content_type=self.get_all_content_types())\n edgengrams = []\n for field in self.model.get_search_fields():\n value = field.get_value(obj)\n\n if isinstance(field, RelatedFields):\n if isinstance(value, (models.Manager, models.QuerySet)):\n nested_docs = []\n\n for nested_obj in value.all():\n nested_doc, extra_edgengrams = self._get_nested_document(field.fields, nested_obj)\n nested_docs.append(nested_doc)\n edgengrams.extend(extra_edgengrams)\n\n value = nested_docs\n elif isinstance(value, models.Model):\n value, extra_edgengrams = self._get_nested_document(field.fields, value)\n edgengrams.extend(extra_edgengrams)\n elif isinstance(field, FilterField):\n if isinstance(value, (models.Manager, models.QuerySet)):\n value = list(value.values_list('pk', flat=True))\n elif isinstance(value, models.Model):\n value = value.pk\n elif isinstance(value, (list, tuple)):\n value = [item.pk if isinstance(item, models.Model) else item for item in value]\n\n doc[self.get_field_column_name(field)] = value\n\n # Check if this field should be added into _edgengrams\n if (isinstance(field, SearchField) and field.partial_match) or isinstance(field, AutocompleteField):\n edgengrams.append(value)\n\n # Add partials to document\n doc[self.edgengrams_field_name] = edgengrams\n\n return doc\n\n def __repr__(self):\n return '<ElasticsearchMapping: %s>' % (self.model.__name__, )\n\n\nclass Elasticsearch2SearchQueryCompiler(BaseSearchQueryCompiler):\n mapping_class = Elasticsearch2Mapping\n DEFAULT_OPERATOR = 'or'\n\n def __init__(self, *args, **kwargs):\n super(Elasticsearch2SearchQueryCompiler, self).__init__(*args, **kwargs)\n self.mapping = self.mapping_class(self.queryset.model)\n\n # Convert field names into index column names\n if self.fields:\n fields = []\n searchable_fields = {f.field_name: f for f in self.queryset.model.get_searchable_search_fields()}\n for field_name in self.fields:\n if field_name in searchable_fields:\n field_name = self.mapping.get_field_column_name(searchable_fields[field_name])\n\n fields.append(field_name)\n\n self.remapped_fields = fields\n else:\n self.remapped_fields = None\n\n def _process_lookup(self, field, lookup, value):\n column_name = self.mapping.get_field_column_name(field)\n\n if lookup == 'exact':\n if value is None:\n return {\n 'missing': {\n 'field': column_name,\n }\n }\n else:\n return {\n 'term': {\n column_name: value,\n }\n }\n\n if lookup == 'isnull':\n if value:\n return {\n 'missing': {\n 'field': column_name,\n }\n }\n else:\n return {\n 'exists': {\n 'field': column_name,\n }\n }\n\n if lookup in ['startswith', 'prefix']:\n return {\n 'prefix': {\n column_name: value,\n }\n }\n\n if lookup in ['gt', 'gte', 'lt', 'lte']:\n return {\n 'range': {\n column_name: {\n lookup: value,\n }\n }\n }\n\n if lookup == 'range':\n lower, upper = value\n\n return {\n 'range': {\n column_name: {\n 'gte': lower,\n 'lte': upper,\n }\n }\n }\n\n if lookup == 'in':\n if isinstance(value, Query):\n db_alias = self.queryset._db or DEFAULT_DB_ALIAS\n resultset = value.get_compiler(db_alias).execute_sql(result_type=MULTI)\n value = [row[0] for chunk in resultset for row in chunk]\n\n elif not isinstance(value, list):\n value = list(value)\n return {\n 'terms': {\n column_name: value,\n }\n }\n\n def _connect_filters(self, filters, connector, negated):\n if filters:\n if len(filters) == 1:\n filter_out = filters[0]\n else:\n filter_out = {\n connector.lower(): [\n fil for fil in filters if fil is not None\n ]\n }\n\n if negated:\n filter_out = {\n 'not': filter_out\n }\n\n return filter_out\n\n def _compile_plaintext_query(self, query, fields, boost=1.0):\n match_query = {\n 'query': query.query_string\n }\n\n if query.operator != 'or':\n match_query['operator'] = query.operator\n\n if boost != 1.0:\n match_query['boost'] = boost\n\n if len(fields) == 1:\n return {\n 'match': {\n fields[0]: match_query\n }\n }\n else:\n match_query['fields'] = fields\n\n return {\n 'multi_match': match_query\n }\n\n def _compile_phrase_query(self, query, fields):\n if len(fields) == 1:\n return {\n 'match_phrase': {\n fields[0]: query.query_string\n }\n }\n else:\n return {\n 'multi_match': {\n 'query': query.query_string,\n 'fields': fields,\n 'type': 'phrase',\n }\n }\n\n def _compile_query(self, query, field, boost=1.0):\n if isinstance(query, MatchAll):\n match_all_query = {}\n\n if boost != 1.0:\n match_all_query['boost'] = boost\n\n return {'match_all': match_all_query}\n\n elif isinstance(query, And):\n return {\n 'bool': {\n 'must': [\n self._compile_query(child_query, field, boost)\n for child_query in query.subqueries\n ]\n }\n }\n\n elif isinstance(query, Or):\n return {\n 'bool': {\n 'should': [\n self._compile_query(child_query, field, boost)\n for child_query in query.subqueries\n ]\n }\n }\n\n elif isinstance(query, Not):\n return {\n 'bool': {\n 'mustNot': self._compile_query(query.subquery, field, boost)\n }\n }\n\n elif isinstance(query, PlainText):\n return self._compile_plaintext_query(query, [field], boost)\n\n elif isinstance(query, Phrase):\n return self._compile_phrase_query(query, [field])\n\n elif isinstance(query, Boost):\n return self._compile_query(query.subquery, field, boost * query.boost)\n\n else:\n raise NotImplementedError(\n '`%s` is not supported by the Elasticsearch search backend.'\n % query.__class__.__name__)\n\n def get_inner_query(self):\n if self.remapped_fields:\n fields = self.remapped_fields\n elif self.partial_match:\n fields = [self.mapping.all_field_name, self.mapping.edgengrams_field_name]\n else:\n fields = [self.mapping.all_field_name]\n\n if len(fields) == 0:\n # No fields. Return a query that'll match nothing\n return {\n 'bool': {\n 'mustNot': {'match_all': {}}\n }\n }\n\n # Handle MatchAll and PlainText separately as they were supported\n # before \"search query classes\" was implemented and we'd like to\n # keep the query the same as before\n if isinstance(self.query, MatchAll):\n return {'match_all': {}}\n\n elif isinstance(self.query, PlainText):\n return self._compile_plaintext_query(self.query, fields)\n\n elif isinstance(self.query, Phrase):\n return self._compile_phrase_query(self.query, fields)\n\n else:\n if len(fields) == 1:\n return self._compile_query(self.query, fields[0])\n else:\n # Compile a query for each field then combine with disjunction\n # max (or operator which takes the max score out of each of the\n # field queries)\n field_queries = []\n for field in fields:\n field_queries.append(self._compile_query(self.query, field))\n\n return {\n 'dis_max': {\n 'queries': field_queries\n }\n }\n\n def get_content_type_filter(self):\n # Query content_type using a \"match\" query. See comment in\n # Elasticsearch2Mapping.get_document for more details\n content_type = self.mapping_class(self.queryset.model).get_content_type()\n\n return {\n 'match': {\n 'content_type': content_type\n }\n }\n\n def get_filters(self):\n filters = []\n\n # Filter by content type\n filters.append(self.get_content_type_filter())\n\n # Apply filters from queryset\n queryset_filters = self._get_filters_from_queryset()\n if queryset_filters:\n filters.append(queryset_filters)\n\n return filters\n\n def get_query(self):\n inner_query = self.get_inner_query()\n filters = self.get_filters()\n\n if len(filters) == 1:\n return {\n 'filtered': {\n 'query': inner_query,\n 'filter': filters[0],\n }\n }\n elif len(filters) > 1:\n return {\n 'filtered': {\n 'query': inner_query,\n 'filter': {\n 'and': filters,\n }\n }\n }\n else:\n return inner_query\n\n def get_sort(self):\n # Ordering by relevance is the default in Elasticsearch\n if self.order_by_relevance:\n return\n\n # Get queryset and make sure its ordered\n if self.queryset.ordered:\n sort = []\n\n for reverse, field in self._get_order_by():\n column_name = self.mapping.get_field_column_name(field)\n\n sort.append({\n column_name: 'desc' if reverse else 'asc'\n })\n\n return sort\n\n else:\n # Order by pk field\n return ['pk']\n\n def __repr__(self):\n return json.dumps(self.get_query())\n\n\nclass ElasticsearchAutocompleteQueryCompilerImpl:\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Convert field names into index column names\n # Note: this overrides Elasticsearch2SearchQueryCompiler by using autocomplete fields instead of searchbale fields\n if self.fields:\n fields = []\n autocomplete_fields = {f.field_name: f for f in self.queryset.model.get_autocomplete_search_fields()}\n for field_name in self.fields:\n if field_name in autocomplete_fields:\n field_name = self.mapping.get_field_column_name(autocomplete_fields[field_name])\n\n fields.append(field_name)\n\n self.remapped_fields = fields\n else:\n self.remapped_fields = None\n\n def get_inner_query(self):\n fields = self.remapped_fields or [self.mapping.edgengrams_field_name]\n\n if len(fields) == 0:\n # No fields. Return a query that'll match nothing\n return {\n 'bool': {\n 'mustNot': {'match_all': {}}\n }\n }\n\n return self._compile_plaintext_query(self.query, fields)\n\n\nclass Elasticsearch2AutocompleteQueryCompiler(Elasticsearch2SearchQueryCompiler, ElasticsearchAutocompleteQueryCompilerImpl):\n pass\n\n\nclass Elasticsearch2SearchResults(BaseSearchResults):\n fields_param_name = 'fields'\n supports_facet = True\n\n def facet(self, field_name):\n # Get field\n field = self.query_compiler._get_filterable_field(field_name)\n if field is None:\n raise FilterFieldError(\n 'Cannot facet search results with field \"' + field_name + '\". Please add index.FilterField(\\''\n + field_name + '\\') to ' + self.query_compiler.queryset.model.__name__ + '.search_fields.',\n field_name=field_name\n )\n\n # Build body\n body = self._get_es_body()\n column_name = self.query_compiler.mapping.get_field_column_name(field)\n\n body['aggregations'] = {\n field_name: {\n 'terms': {\n 'field': column_name,\n 'missing': 0,\n }\n }\n }\n\n # Send to Elasticsearch\n response = self.backend.es.search(\n index=self.backend.get_index_for_model(self.query_compiler.queryset.model).name,\n body=body,\n size=0,\n )\n\n return OrderedDict([\n (bucket['key'] if bucket['key'] != 0 else None, bucket['doc_count'])\n for bucket in response['aggregations'][field_name]['buckets']\n ])\n\n def _get_es_body(self, for_count=False):\n body = {\n 'query': self.query_compiler.get_query()\n }\n\n if not for_count:\n sort = self.query_compiler.get_sort()\n\n if sort is not None:\n body['sort'] = sort\n\n return body\n\n def _get_results_from_hits(self, hits):\n \"\"\"\n Yields Django model instances from a page of hits returned by Elasticsearch\n \"\"\"\n # Get pks from results\n pks = [hit['fields']['pk'][0] for hit in hits]\n scores = {str(hit['fields']['pk'][0]): hit['_score'] for hit in hits}\n\n # Initialise results dictionary\n results = {str(pk): None for pk in pks}\n\n # Find objects in database and add them to dict\n for obj in self.query_compiler.queryset.filter(pk__in=pks):\n results[str(obj.pk)] = obj\n\n if self._score_field:\n setattr(obj, self._score_field, scores.get(str(obj.pk)))\n\n # Yield results in order given by Elasticsearch\n for pk in pks:\n result = results[str(pk)]\n if result:\n yield result\n\n def _do_search(self):\n PAGE_SIZE = 100\n\n if self.stop is not None:\n limit = self.stop - self.start\n else:\n limit = None\n\n use_scroll = limit is None or limit > PAGE_SIZE\n\n params = {\n 'index': self.backend.get_index_for_model(self.query_compiler.queryset.model).name,\n 'body': self._get_es_body(),\n '_source': False,\n self.fields_param_name: 'pk',\n }\n\n if use_scroll:\n params.update({\n 'scroll': '2m',\n 'size': PAGE_SIZE,\n })\n\n # The scroll API doesn't support offset, manually skip the first results\n skip = self.start\n\n # Send to Elasticsearch\n page = self.backend.es.search(**params)\n\n while True:\n hits = page['hits']['hits']\n\n if len(hits) == 0:\n break\n\n # Get results\n if skip < len(hits):\n for result in self._get_results_from_hits(hits):\n if limit is not None and limit == 0:\n break\n\n if skip == 0:\n yield result\n\n if limit is not None:\n limit -= 1\n else:\n skip -= 1\n\n if limit is not None and limit == 0:\n break\n else:\n # Skip whole page\n skip -= len(hits)\n\n # Fetch next page of results\n if '_scroll_id' not in page:\n break\n\n page = self.backend.es.scroll(scroll_id=page['_scroll_id'], scroll='2m')\n\n # Clear the scroll\n if '_scroll_id' in page:\n self.backend.es.clear_scroll(scroll_id=page['_scroll_id'])\n else:\n params.update({\n 'from_': self.start,\n 'size': limit or PAGE_SIZE,\n })\n\n # Send to Elasticsearch\n hits = self.backend.es.search(**params)['hits']['hits']\n\n # Get results\n for result in self._get_results_from_hits(hits):\n yield result\n\n def _do_count(self):\n # Get count\n hit_count = self.backend.es.count(\n index=self.backend.get_index_for_model(self.query_compiler.queryset.model).name,\n body=self._get_es_body(for_count=True),\n )['count']\n\n # Add limits\n hit_count -= self.start\n if self.stop is not None:\n hit_count = min(hit_count, self.stop - self.start)\n\n return max(hit_count, 0)\n\n\nclass Elasticsearch2Index:\n def __init__(self, backend, name):\n self.backend = backend\n self.es = backend.es\n self.mapping_class = backend.mapping_class\n self.name = name\n\n def put(self):\n self.es.indices.create(self.name, self.backend.settings)\n\n def delete(self):\n try:\n self.es.indices.delete(self.name)\n except NotFoundError:\n pass\n\n def exists(self):\n return self.es.indices.exists(self.name)\n\n def is_alias(self):\n return self.es.indices.exists_alias(name=self.name)\n\n def aliased_indices(self):\n \"\"\"\n If this index object represents an alias (which appear the same in the\n Elasticsearch API), this method can be used to fetch the list of indices\n the alias points to.\n\n Use the is_alias method if you need to find out if this an alias. This\n returns an empty list if called on an index.\n \"\"\"\n return [\n self.backend.index_class(self.backend, index_name)\n for index_name in self.es.indices.get_alias(name=self.name).keys()\n ]\n\n def put_alias(self, name):\n \"\"\"\n Creates a new alias to this index. If the alias already exists it will\n be repointed to this index.\n \"\"\"\n self.es.indices.put_alias(name=name, index=self.name)\n\n def add_model(self, model):\n # Get mapping\n mapping = self.mapping_class(model)\n\n # Put mapping\n self.es.indices.put_mapping(\n # pass update_all_types=True as a workaround to avoid \"Can't redefine search field\" errors -\n # see https://github.com/wagtail/wagtail/issues/2968\n index=self.name, doc_type=mapping.get_document_type(), body=mapping.get_mapping(),\n update_all_types=True\n )\n\n def add_item(self, item):\n # Make sure the object can be indexed\n if not class_is_indexed(item.__class__):\n return\n\n # Get mapping\n mapping = self.mapping_class(item.__class__)\n\n # Add document to index\n self.es.index(\n self.name, mapping.get_document_type(), mapping.get_document(item), id=mapping.get_document_id(item)\n )\n\n def add_items(self, model, items):\n if not class_is_indexed(model):\n return\n\n # Get mapping\n mapping = self.mapping_class(model)\n doc_type = mapping.get_document_type()\n\n # Create list of actions\n actions = []\n for item in items:\n # Create the action\n action = {\n '_type': doc_type,\n '_id': mapping.get_document_id(item),\n }\n action.update(mapping.get_document(item))\n actions.append(action)\n\n # Run the actions\n bulk(self.es, actions, index=self.name)\n\n def delete_item(self, item):\n # Make sure the object can be indexed\n if not class_is_indexed(item.__class__):\n return\n\n # Get mapping\n mapping = self.mapping_class(item.__class__)\n\n # Delete document\n try:\n self.es.delete(\n self.name,\n mapping.get_document_type(),\n mapping.get_document_id(item),\n )\n except NotFoundError:\n pass # Document doesn't exist, ignore this exception\n\n def refresh(self):\n self.es.indices.refresh(self.name)\n\n def reset(self):\n # Delete old index\n self.delete()\n\n # Create new index\n self.put()\n\n\nclass ElasticsearchIndexRebuilder:\n def __init__(self, index):\n self.index = index\n\n def reset_index(self):\n self.index.reset()\n\n def start(self):\n # Reset the index\n self.reset_index()\n\n return self.index\n\n def finish(self):\n self.index.refresh()\n\n\nclass ElasticsearchAtomicIndexRebuilder(ElasticsearchIndexRebuilder):\n def __init__(self, index):\n self.alias = index\n self.index = index.backend.index_class(\n index.backend,\n self.alias.name + '_' + get_random_string(7).lower()\n )\n\n def reset_index(self):\n # Delete old index using the alias\n # This should delete both the alias and the index\n self.alias.delete()\n\n # Create new index\n self.index.put()\n\n # Create a new alias\n self.index.put_alias(self.alias.name)\n\n def start(self):\n # Create the new index\n self.index.put()\n\n return self.index\n\n def finish(self):\n self.index.refresh()\n\n if self.alias.is_alias():\n # Update existing alias, then delete the old index\n\n # Find index that alias currently points to, we'll delete it after\n # updating the alias\n old_index = self.alias.aliased_indices()\n\n # Update alias to point to new index\n self.index.put_alias(self.alias.name)\n\n # Delete old index\n # aliased_indices() can return multiple indices. Delete them all\n for index in old_index:\n if index.name != self.index.name:\n index.delete()\n\n else:\n # self.alias doesn't currently refer to an alias in Elasticsearch.\n # This means that either nothing exists in ES with that name or\n # there is currently an index with the that name\n\n # Run delete on the alias, just in case it is currently an index.\n # This happens on the first rebuild after switching ATOMIC_REBUILD on\n self.alias.delete()\n\n # Create the alias\n self.index.put_alias(self.alias.name)\n\n\nclass Elasticsearch2SearchBackend(BaseSearchBackend):\n index_class = Elasticsearch2Index\n query_compiler_class = Elasticsearch2SearchQueryCompiler\n autocomplete_query_compiler_class = Elasticsearch2AutocompleteQueryCompiler\n results_class = Elasticsearch2SearchResults\n mapping_class = Elasticsearch2Mapping\n basic_rebuilder_class = ElasticsearchIndexRebuilder\n atomic_rebuilder_class = ElasticsearchAtomicIndexRebuilder\n catch_indexing_errors = True\n\n settings = {\n 'settings': {\n 'analysis': {\n 'analyzer': {\n 'ngram_analyzer': {\n 'type': 'custom',\n 'tokenizer': 'lowercase',\n 'filter': ['asciifolding', 'ngram']\n },\n 'edgengram_analyzer': {\n 'type': 'custom',\n 'tokenizer': 'lowercase',\n 'filter': ['asciifolding', 'edgengram']\n }\n },\n 'tokenizer': {\n 'ngram_tokenizer': {\n 'type': 'nGram',\n 'min_gram': 3,\n 'max_gram': 15,\n },\n 'edgengram_tokenizer': {\n 'type': 'edgeNGram',\n 'min_gram': 2,\n 'max_gram': 15,\n 'side': 'front'\n }\n },\n 'filter': {\n 'ngram': {\n 'type': 'nGram',\n 'min_gram': 3,\n 'max_gram': 15\n },\n 'edgengram': {\n 'type': 'edgeNGram',\n 'min_gram': 1,\n 'max_gram': 15\n }\n }\n }\n }\n }\n\n def __init__(self, params):\n super(Elasticsearch2SearchBackend, self).__init__(params)\n\n # Get settings\n self.hosts = params.pop('HOSTS', None)\n self.index_name = params.pop('INDEX', 'wagtail')\n self.timeout = params.pop('TIMEOUT', 10)\n\n if params.pop('ATOMIC_REBUILD', False):\n self.rebuilder_class = self.atomic_rebuilder_class\n else:\n self.rebuilder_class = self.basic_rebuilder_class\n\n # If HOSTS is not set, convert URLS setting to HOSTS\n es_urls = params.pop('URLS', ['http://localhost:9200'])\n if self.hosts is None:\n self.hosts = []\n\n for url in es_urls:\n parsed_url = urlparse(url)\n\n use_ssl = parsed_url.scheme == 'https'\n port = parsed_url.port or (443 if use_ssl else 80)\n\n http_auth = None\n if parsed_url.username is not None and parsed_url.password is not None:\n http_auth = (parsed_url.username, parsed_url.password)\n\n self.hosts.append({\n 'host': parsed_url.hostname,\n 'port': port,\n 'url_prefix': parsed_url.path,\n 'use_ssl': use_ssl,\n 'verify_certs': use_ssl,\n 'http_auth': http_auth,\n })\n\n self.settings = copy.deepcopy(self.settings) # Make the class settings attribute as instance settings attribute\n self.settings = deep_update(self.settings, params.pop(\"INDEX_SETTINGS\", {}))\n\n # Get Elasticsearch interface\n # Any remaining params are passed into the Elasticsearch constructor\n options = params.pop('OPTIONS', {})\n\n self.es = Elasticsearch(\n hosts=self.hosts,\n timeout=self.timeout,\n **options)\n\n def get_index_for_model(self, model):\n # Split models up into separate indices based on their root model.\n # For example, all page-derived models get put together in one index,\n # while images and documents each have their own index.\n root_model = get_model_root(model)\n index_suffix = '__' + root_model._meta.app_label.lower() + '_' + root_model.__name__.lower()\n\n return self.index_class(self, self.index_name + index_suffix)\n\n def get_index(self):\n return self.index_class(self, self.index_name)\n\n def get_rebuilder(self):\n return self.rebuilder_class(self.get_index())\n\n def reset_index(self):\n # Use the rebuilder to reset the index\n self.get_rebuilder().reset_index()\n\n\nSearchBackend = Elasticsearch2SearchBackend\n", "path": "wagtail/search/backends/elasticsearch2.py" } ]
[ { "content": "import copy\nimport json\nfrom collections import OrderedDict\nfrom urllib.parse import urlparse\n\nfrom django.db import DEFAULT_DB_ALIAS, models\nfrom django.db.models.sql import Query\nfrom django.db.models.sql.constants import MULTI\nfrom django.utils.crypto import get_random_string\nfrom elasticsearch import Elasticsearch, NotFoundError\nfrom elasticsearch.helpers import bulk\n\nfrom wagtail.search.backends.base import (\n BaseSearchBackend, BaseSearchQueryCompiler, BaseSearchResults, FilterFieldError)\nfrom wagtail.search.index import (\n AutocompleteField, FilterField, Indexed, RelatedFields, SearchField, class_is_indexed)\nfrom wagtail.search.query import And, Boost, MatchAll, Not, Or, Phrase, PlainText\nfrom wagtail.utils.utils import deep_update\n\n\ndef get_model_root(model):\n \"\"\"\n This function finds the root model for any given model. The root model is\n the highest concrete model that it descends from. If the model doesn't\n descend from another concrete model then the model is it's own root model so\n it is returned.\n\n Examples:\n >>> get_model_root(wagtailcore.Page)\n wagtailcore.Page\n\n >>> get_model_root(myapp.HomePage)\n wagtailcore.Page\n\n >>> get_model_root(wagtailimages.Image)\n wagtailimages.Image\n \"\"\"\n if model._meta.parents:\n parent_model = list(model._meta.parents.items())[0][0]\n return get_model_root(parent_model)\n\n return model\n\n\nclass Elasticsearch2Mapping:\n all_field_name = '_all'\n\n # Was originally named '_partials' but renamed '_edgengrams' when we added Elasticsearch 6 support\n # The ES 2 and 5 backends still use the old name for backwards compatibility\n edgengrams_field_name = '_partials'\n\n type_map = {\n 'AutoField': 'integer',\n 'BinaryField': 'binary',\n 'BooleanField': 'boolean',\n 'CharField': 'string',\n 'CommaSeparatedIntegerField': 'string',\n 'DateField': 'date',\n 'DateTimeField': 'date',\n 'DecimalField': 'double',\n 'FileField': 'string',\n 'FilePathField': 'string',\n 'FloatField': 'double',\n 'IntegerField': 'integer',\n 'BigIntegerField': 'long',\n 'IPAddressField': 'string',\n 'GenericIPAddressField': 'string',\n 'NullBooleanField': 'boolean',\n 'PositiveIntegerField': 'integer',\n 'PositiveSmallIntegerField': 'integer',\n 'SlugField': 'string',\n 'SmallIntegerField': 'integer',\n 'TextField': 'string',\n 'TimeField': 'date',\n }\n\n keyword_type = 'string'\n text_type = 'string'\n set_index_not_analyzed_on_filter_fields = True\n edgengram_analyzer_config = {\n 'analyzer': 'edgengram_analyzer',\n 'search_analyzer': 'standard',\n }\n\n def __init__(self, model):\n self.model = model\n\n def get_parent(self):\n for base in self.model.__bases__:\n if issubclass(base, Indexed) and issubclass(base, models.Model):\n return type(self)(base)\n\n def get_document_type(self):\n return self.model.indexed_get_content_type()\n\n def get_field_column_name(self, field):\n # Fields in derived models get prefixed with their model name, fields\n # in the root model don't get prefixed at all\n # This is to prevent mapping clashes in cases where two page types have\n # a field with the same name but a different type.\n root_model = get_model_root(self.model)\n definition_model = field.get_definition_model(self.model)\n\n if definition_model != root_model:\n prefix = definition_model._meta.app_label.lower() + '_' + definition_model.__name__.lower() + '__'\n else:\n prefix = ''\n\n if isinstance(field, FilterField):\n return prefix + field.get_attname(self.model) + '_filter'\n elif isinstance(field, AutocompleteField):\n return prefix + field.get_attname(self.model) + '_edgengrams'\n elif isinstance(field, SearchField):\n return prefix + field.get_attname(self.model)\n elif isinstance(field, RelatedFields):\n return prefix + field.field_name\n\n def get_content_type(self):\n \"\"\"\n Returns the content type as a string for the model.\n\n For example: \"wagtailcore.Page\"\n \"myapp.MyModel\"\n \"\"\"\n return self.model._meta.app_label + '.' + self.model.__name__\n\n def get_all_content_types(self):\n \"\"\"\n Returns all the content type strings that apply to this model.\n This includes the models' content type and all concrete ancestor\n models that inherit from Indexed.\n\n For example: [\"myapp.MyPageModel\", \"wagtailcore.Page\"]\n [\"myapp.MyModel\"]\n \"\"\"\n # Add our content type\n content_types = [self.get_content_type()]\n\n # Add all ancestor classes content types as well\n ancestor = self.get_parent()\n while ancestor:\n content_types.append(ancestor.get_content_type())\n ancestor = ancestor.get_parent()\n\n return content_types\n\n def get_field_mapping(self, field):\n if isinstance(field, RelatedFields):\n mapping = {'type': 'nested', 'properties': {}}\n nested_model = field.get_field(self.model).related_model\n nested_mapping = type(self)(nested_model)\n\n for sub_field in field.fields:\n sub_field_name, sub_field_mapping = nested_mapping.get_field_mapping(sub_field)\n mapping['properties'][sub_field_name] = sub_field_mapping\n\n return self.get_field_column_name(field), mapping\n else:\n mapping = {'type': self.type_map.get(field.get_type(self.model), 'string')}\n\n if isinstance(field, SearchField):\n if mapping['type'] == 'string':\n mapping['type'] = self.text_type\n\n if field.boost:\n mapping['boost'] = field.boost\n\n if field.partial_match:\n mapping.update(self.edgengram_analyzer_config)\n\n mapping['include_in_all'] = True\n\n if isinstance(field, AutocompleteField):\n mapping['type'] = self.text_type\n mapping['include_in_all'] = False\n mapping.update(self.edgengram_analyzer_config)\n\n elif isinstance(field, FilterField):\n if mapping['type'] == 'string':\n mapping['type'] = self.keyword_type\n\n if self.set_index_not_analyzed_on_filter_fields:\n # Not required on ES5 as that uses the \"keyword\" type for\n # filtered string fields\n mapping['index'] = 'not_analyzed'\n\n mapping['include_in_all'] = False\n\n if 'es_extra' in field.kwargs:\n for key, value in field.kwargs['es_extra'].items():\n mapping[key] = value\n\n return self.get_field_column_name(field), mapping\n\n def get_mapping(self):\n # Make field list\n fields = {\n 'pk': dict(type=self.keyword_type, store=True, include_in_all=False),\n 'content_type': dict(type=self.keyword_type, include_in_all=False),\n self.edgengrams_field_name: dict(type=self.text_type, include_in_all=False),\n }\n fields[self.edgengrams_field_name].update(self.edgengram_analyzer_config)\n\n if self.set_index_not_analyzed_on_filter_fields:\n # Not required on ES5 as that uses the \"keyword\" type for\n # filtered string fields\n fields['pk']['index'] = 'not_analyzed'\n fields['content_type']['index'] = 'not_analyzed'\n\n fields.update(dict(\n self.get_field_mapping(field) for field in self.model.get_search_fields()\n ))\n\n return {\n self.get_document_type(): {\n 'properties': fields,\n }\n }\n\n def get_document_id(self, obj):\n return obj.indexed_get_toplevel_content_type() + ':' + str(obj.pk)\n\n def _get_nested_document(self, fields, obj):\n doc = {}\n edgengrams = []\n model = type(obj)\n mapping = type(self)(model)\n\n for field in fields:\n value = field.get_value(obj)\n doc[mapping.get_field_column_name(field)] = value\n\n # Check if this field should be added into _edgengrams\n if (isinstance(field, SearchField) and field.partial_match) or isinstance(field, AutocompleteField):\n edgengrams.append(value)\n\n return doc, edgengrams\n\n def get_document(self, obj):\n # Build document\n doc = dict(pk=str(obj.pk), content_type=self.get_all_content_types())\n edgengrams = []\n for field in self.model.get_search_fields():\n value = field.get_value(obj)\n\n if isinstance(field, RelatedFields):\n if isinstance(value, (models.Manager, models.QuerySet)):\n nested_docs = []\n\n for nested_obj in value.all():\n nested_doc, extra_edgengrams = self._get_nested_document(field.fields, nested_obj)\n nested_docs.append(nested_doc)\n edgengrams.extend(extra_edgengrams)\n\n value = nested_docs\n elif isinstance(value, models.Model):\n value, extra_edgengrams = self._get_nested_document(field.fields, value)\n edgengrams.extend(extra_edgengrams)\n elif isinstance(field, FilterField):\n if isinstance(value, (models.Manager, models.QuerySet)):\n value = list(value.values_list('pk', flat=True))\n elif isinstance(value, models.Model):\n value = value.pk\n elif isinstance(value, (list, tuple)):\n value = [item.pk if isinstance(item, models.Model) else item for item in value]\n\n doc[self.get_field_column_name(field)] = value\n\n # Check if this field should be added into _edgengrams\n if (isinstance(field, SearchField) and field.partial_match) or isinstance(field, AutocompleteField):\n edgengrams.append(value)\n\n # Add partials to document\n doc[self.edgengrams_field_name] = edgengrams\n\n return doc\n\n def __repr__(self):\n return '<ElasticsearchMapping: %s>' % (self.model.__name__, )\n\n\nclass Elasticsearch2SearchQueryCompiler(BaseSearchQueryCompiler):\n mapping_class = Elasticsearch2Mapping\n DEFAULT_OPERATOR = 'or'\n\n def __init__(self, *args, **kwargs):\n super(Elasticsearch2SearchQueryCompiler, self).__init__(*args, **kwargs)\n self.mapping = self.mapping_class(self.queryset.model)\n\n # Convert field names into index column names\n if self.fields:\n fields = []\n searchable_fields = {f.field_name: f for f in self.queryset.model.get_searchable_search_fields()}\n for field_name in self.fields:\n if field_name in searchable_fields:\n field_name = self.mapping.get_field_column_name(searchable_fields[field_name])\n\n fields.append(field_name)\n\n self.remapped_fields = fields\n else:\n self.remapped_fields = None\n\n def _process_lookup(self, field, lookup, value):\n column_name = self.mapping.get_field_column_name(field)\n\n if lookup == 'exact':\n if value is None:\n return {\n 'missing': {\n 'field': column_name,\n }\n }\n else:\n return {\n 'term': {\n column_name: value,\n }\n }\n\n if lookup == 'isnull':\n if value:\n return {\n 'missing': {\n 'field': column_name,\n }\n }\n else:\n return {\n 'exists': {\n 'field': column_name,\n }\n }\n\n if lookup in ['startswith', 'prefix']:\n return {\n 'prefix': {\n column_name: value,\n }\n }\n\n if lookup in ['gt', 'gte', 'lt', 'lte']:\n return {\n 'range': {\n column_name: {\n lookup: value,\n }\n }\n }\n\n if lookup == 'range':\n lower, upper = value\n\n return {\n 'range': {\n column_name: {\n 'gte': lower,\n 'lte': upper,\n }\n }\n }\n\n if lookup == 'in':\n if isinstance(value, Query):\n db_alias = self.queryset._db or DEFAULT_DB_ALIAS\n resultset = value.get_compiler(db_alias).execute_sql(result_type=MULTI)\n value = [row[0] for chunk in resultset for row in chunk]\n\n elif not isinstance(value, list):\n value = list(value)\n return {\n 'terms': {\n column_name: value,\n }\n }\n\n def _connect_filters(self, filters, connector, negated):\n if filters:\n if len(filters) == 1:\n filter_out = filters[0]\n else:\n filter_out = {\n connector.lower(): [\n fil for fil in filters if fil is not None\n ]\n }\n\n if negated:\n filter_out = {\n 'not': filter_out\n }\n\n return filter_out\n\n def _compile_plaintext_query(self, query, fields, boost=1.0):\n match_query = {\n 'query': query.query_string\n }\n\n if query.operator != 'or':\n match_query['operator'] = query.operator\n\n if boost != 1.0:\n match_query['boost'] = boost\n\n if len(fields) == 1:\n return {\n 'match': {\n fields[0]: match_query\n }\n }\n else:\n match_query['fields'] = fields\n\n return {\n 'multi_match': match_query\n }\n\n def _compile_phrase_query(self, query, fields):\n if len(fields) == 1:\n return {\n 'match_phrase': {\n fields[0]: query.query_string\n }\n }\n else:\n return {\n 'multi_match': {\n 'query': query.query_string,\n 'fields': fields,\n 'type': 'phrase',\n }\n }\n\n def _compile_query(self, query, field, boost=1.0):\n if isinstance(query, MatchAll):\n match_all_query = {}\n\n if boost != 1.0:\n match_all_query['boost'] = boost\n\n return {'match_all': match_all_query}\n\n elif isinstance(query, And):\n return {\n 'bool': {\n 'must': [\n self._compile_query(child_query, field, boost)\n for child_query in query.subqueries\n ]\n }\n }\n\n elif isinstance(query, Or):\n return {\n 'bool': {\n 'should': [\n self._compile_query(child_query, field, boost)\n for child_query in query.subqueries\n ]\n }\n }\n\n elif isinstance(query, Not):\n return {\n 'bool': {\n 'mustNot': self._compile_query(query.subquery, field, boost)\n }\n }\n\n elif isinstance(query, PlainText):\n return self._compile_plaintext_query(query, [field], boost)\n\n elif isinstance(query, Phrase):\n return self._compile_phrase_query(query, [field])\n\n elif isinstance(query, Boost):\n return self._compile_query(query.subquery, field, boost * query.boost)\n\n else:\n raise NotImplementedError(\n '`%s` is not supported by the Elasticsearch search backend.'\n % query.__class__.__name__)\n\n def get_inner_query(self):\n if self.remapped_fields:\n fields = self.remapped_fields\n elif self.partial_match:\n fields = [self.mapping.all_field_name, self.mapping.edgengrams_field_name]\n else:\n fields = [self.mapping.all_field_name]\n\n if len(fields) == 0:\n # No fields. Return a query that'll match nothing\n return {\n 'bool': {\n 'mustNot': {'match_all': {}}\n }\n }\n\n # Handle MatchAll and PlainText separately as they were supported\n # before \"search query classes\" was implemented and we'd like to\n # keep the query the same as before\n if isinstance(self.query, MatchAll):\n return {'match_all': {}}\n\n elif isinstance(self.query, PlainText):\n return self._compile_plaintext_query(self.query, fields)\n\n elif isinstance(self.query, Phrase):\n return self._compile_phrase_query(self.query, fields)\n\n else:\n if len(fields) == 1:\n return self._compile_query(self.query, fields[0])\n else:\n # Compile a query for each field then combine with disjunction\n # max (or operator which takes the max score out of each of the\n # field queries)\n field_queries = []\n for field in fields:\n field_queries.append(self._compile_query(self.query, field))\n\n return {\n 'dis_max': {\n 'queries': field_queries\n }\n }\n\n def get_content_type_filter(self):\n # Query content_type using a \"match\" query. See comment in\n # Elasticsearch2Mapping.get_document for more details\n content_type = self.mapping_class(self.queryset.model).get_content_type()\n\n return {\n 'match': {\n 'content_type': content_type\n }\n }\n\n def get_filters(self):\n filters = []\n\n # Filter by content type\n filters.append(self.get_content_type_filter())\n\n # Apply filters from queryset\n queryset_filters = self._get_filters_from_queryset()\n if queryset_filters:\n filters.append(queryset_filters)\n\n return filters\n\n def get_query(self):\n inner_query = self.get_inner_query()\n filters = self.get_filters()\n\n if len(filters) == 1:\n return {\n 'filtered': {\n 'query': inner_query,\n 'filter': filters[0],\n }\n }\n elif len(filters) > 1:\n return {\n 'filtered': {\n 'query': inner_query,\n 'filter': {\n 'and': filters,\n }\n }\n }\n else:\n return inner_query\n\n def get_sort(self):\n # Ordering by relevance is the default in Elasticsearch\n if self.order_by_relevance:\n return\n\n # Get queryset and make sure its ordered\n if self.queryset.ordered:\n sort = []\n\n for reverse, field in self._get_order_by():\n column_name = self.mapping.get_field_column_name(field)\n\n sort.append({\n column_name: 'desc' if reverse else 'asc'\n })\n\n return sort\n\n else:\n # Order by pk field\n return ['pk']\n\n def __repr__(self):\n return json.dumps(self.get_query())\n\n\nclass ElasticsearchAutocompleteQueryCompilerImpl:\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Convert field names into index column names\n # Note: this overrides Elasticsearch2SearchQueryCompiler by using autocomplete fields instead of searchbale fields\n if self.fields:\n fields = []\n autocomplete_fields = {f.field_name: f for f in self.queryset.model.get_autocomplete_search_fields()}\n for field_name in self.fields:\n if field_name in autocomplete_fields:\n field_name = self.mapping.get_field_column_name(autocomplete_fields[field_name])\n\n fields.append(field_name)\n\n self.remapped_fields = fields\n else:\n self.remapped_fields = None\n\n def get_inner_query(self):\n fields = self.remapped_fields or [self.mapping.edgengrams_field_name]\n\n if len(fields) == 0:\n # No fields. Return a query that'll match nothing\n return {\n 'bool': {\n 'mustNot': {'match_all': {}}\n }\n }\n\n return self._compile_plaintext_query(self.query, fields)\n\n\nclass Elasticsearch2AutocompleteQueryCompiler(Elasticsearch2SearchQueryCompiler, ElasticsearchAutocompleteQueryCompilerImpl):\n pass\n\n\nclass Elasticsearch2SearchResults(BaseSearchResults):\n fields_param_name = 'fields'\n supports_facet = True\n\n def facet(self, field_name):\n # Get field\n field = self.query_compiler._get_filterable_field(field_name)\n if field is None:\n raise FilterFieldError(\n 'Cannot facet search results with field \"' + field_name + '\". Please add index.FilterField(\\''\n + field_name + '\\') to ' + self.query_compiler.queryset.model.__name__ + '.search_fields.',\n field_name=field_name\n )\n\n # Build body\n body = self._get_es_body()\n column_name = self.query_compiler.mapping.get_field_column_name(field)\n\n body['aggregations'] = {\n field_name: {\n 'terms': {\n 'field': column_name,\n 'missing': 0,\n }\n }\n }\n\n # Send to Elasticsearch\n response = self.backend.es.search(\n index=self.backend.get_index_for_model(self.query_compiler.queryset.model).name,\n body=body,\n size=0,\n )\n\n return OrderedDict([\n (bucket['key'] if bucket['key'] != 0 else None, bucket['doc_count'])\n for bucket in response['aggregations'][field_name]['buckets']\n ])\n\n def _get_es_body(self, for_count=False):\n body = {\n 'query': self.query_compiler.get_query()\n }\n\n if not for_count:\n sort = self.query_compiler.get_sort()\n\n if sort is not None:\n body['sort'] = sort\n\n return body\n\n def _get_results_from_hits(self, hits):\n \"\"\"\n Yields Django model instances from a page of hits returned by Elasticsearch\n \"\"\"\n # Get pks from results\n pks = [hit['fields']['pk'][0] for hit in hits]\n scores = {str(hit['fields']['pk'][0]): hit['_score'] for hit in hits}\n\n # Initialise results dictionary\n results = {str(pk): None for pk in pks}\n\n # Find objects in database and add them to dict\n for obj in self.query_compiler.queryset.filter(pk__in=pks):\n results[str(obj.pk)] = obj\n\n if self._score_field:\n setattr(obj, self._score_field, scores.get(str(obj.pk)))\n\n # Yield results in order given by Elasticsearch\n for pk in pks:\n result = results[str(pk)]\n if result:\n yield result\n\n def _do_search(self):\n PAGE_SIZE = 100\n\n if self.stop is not None:\n limit = self.stop - self.start\n else:\n limit = None\n\n use_scroll = limit is None or limit > PAGE_SIZE\n\n params = {\n 'index': self.backend.get_index_for_model(self.query_compiler.queryset.model).name,\n 'body': self._get_es_body(),\n '_source': False,\n self.fields_param_name: 'pk',\n }\n\n if use_scroll:\n params.update({\n 'scroll': '2m',\n 'size': PAGE_SIZE,\n })\n\n # The scroll API doesn't support offset, manually skip the first results\n skip = self.start\n\n # Send to Elasticsearch\n page = self.backend.es.search(**params)\n\n while True:\n hits = page['hits']['hits']\n\n if len(hits) == 0:\n break\n\n # Get results\n if skip < len(hits):\n for result in self._get_results_from_hits(hits):\n if limit is not None and limit == 0:\n break\n\n if skip == 0:\n yield result\n\n if limit is not None:\n limit -= 1\n else:\n skip -= 1\n\n if limit is not None and limit == 0:\n break\n else:\n # Skip whole page\n skip -= len(hits)\n\n # Fetch next page of results\n if '_scroll_id' not in page:\n break\n\n page = self.backend.es.scroll(scroll_id=page['_scroll_id'], scroll='2m')\n\n # Clear the scroll\n if '_scroll_id' in page:\n self.backend.es.clear_scroll(scroll_id=page['_scroll_id'])\n else:\n params.update({\n 'from_': self.start,\n 'size': limit or PAGE_SIZE,\n })\n\n # Send to Elasticsearch\n hits = self.backend.es.search(**params)['hits']['hits']\n\n # Get results\n for result in self._get_results_from_hits(hits):\n yield result\n\n def _do_count(self):\n # Get count\n hit_count = self.backend.es.count(\n index=self.backend.get_index_for_model(self.query_compiler.queryset.model).name,\n body=self._get_es_body(for_count=True),\n )['count']\n\n # Add limits\n hit_count -= self.start\n if self.stop is not None:\n hit_count = min(hit_count, self.stop - self.start)\n\n return max(hit_count, 0)\n\n\nclass Elasticsearch2Index:\n def __init__(self, backend, name):\n self.backend = backend\n self.es = backend.es\n self.mapping_class = backend.mapping_class\n self.name = name\n\n def put(self):\n self.es.indices.create(self.name, self.backend.settings)\n\n def delete(self):\n try:\n self.es.indices.delete(self.name)\n except NotFoundError:\n pass\n\n def exists(self):\n return self.es.indices.exists(self.name)\n\n def is_alias(self):\n return self.es.indices.exists_alias(name=self.name)\n\n def aliased_indices(self):\n \"\"\"\n If this index object represents an alias (which appear the same in the\n Elasticsearch API), this method can be used to fetch the list of indices\n the alias points to.\n\n Use the is_alias method if you need to find out if this an alias. This\n returns an empty list if called on an index.\n \"\"\"\n return [\n self.backend.index_class(self.backend, index_name)\n for index_name in self.es.indices.get_alias(name=self.name).keys()\n ]\n\n def put_alias(self, name):\n \"\"\"\n Creates a new alias to this index. If the alias already exists it will\n be repointed to this index.\n \"\"\"\n self.es.indices.put_alias(name=name, index=self.name)\n\n def add_model(self, model):\n # Get mapping\n mapping = self.mapping_class(model)\n\n # Put mapping\n self.es.indices.put_mapping(\n # pass update_all_types=True as a workaround to avoid \"Can't redefine search field\" errors -\n # see https://github.com/wagtail/wagtail/issues/2968\n index=self.name, doc_type=mapping.get_document_type(), body=mapping.get_mapping(),\n update_all_types=True\n )\n\n def add_item(self, item):\n # Make sure the object can be indexed\n if not class_is_indexed(item.__class__):\n return\n\n # Get mapping\n mapping = self.mapping_class(item.__class__)\n\n # Add document to index\n self.es.index(\n self.name, mapping.get_document_type(), mapping.get_document(item), id=mapping.get_document_id(item)\n )\n\n def add_items(self, model, items):\n if not class_is_indexed(model):\n return\n\n # Get mapping\n mapping = self.mapping_class(model)\n doc_type = mapping.get_document_type()\n\n # Create list of actions\n actions = []\n for item in items:\n # Create the action\n action = {\n '_type': doc_type,\n '_id': mapping.get_document_id(item),\n }\n action.update(mapping.get_document(item))\n actions.append(action)\n\n # Run the actions\n bulk(self.es, actions, index=self.name)\n\n def delete_item(self, item):\n # Make sure the object can be indexed\n if not class_is_indexed(item.__class__):\n return\n\n # Get mapping\n mapping = self.mapping_class(item.__class__)\n\n # Delete document\n try:\n self.es.delete(\n self.name,\n mapping.get_document_type(),\n mapping.get_document_id(item),\n )\n except NotFoundError:\n pass # Document doesn't exist, ignore this exception\n\n def refresh(self):\n self.es.indices.refresh(self.name)\n\n def reset(self):\n # Delete old index\n self.delete()\n\n # Create new index\n self.put()\n\n\nclass ElasticsearchIndexRebuilder:\n def __init__(self, index):\n self.index = index\n\n def reset_index(self):\n self.index.reset()\n\n def start(self):\n # Reset the index\n self.reset_index()\n\n return self.index\n\n def finish(self):\n self.index.refresh()\n\n\nclass ElasticsearchAtomicIndexRebuilder(ElasticsearchIndexRebuilder):\n def __init__(self, index):\n self.alias = index\n self.index = index.backend.index_class(\n index.backend,\n self.alias.name + '_' + get_random_string(7).lower()\n )\n\n def reset_index(self):\n # Delete old index using the alias\n # This should delete both the alias and the index\n self.alias.delete()\n\n # Create new index\n self.index.put()\n\n # Create a new alias\n self.index.put_alias(self.alias.name)\n\n def start(self):\n # Create the new index\n self.index.put()\n\n return self.index\n\n def finish(self):\n self.index.refresh()\n\n if self.alias.is_alias():\n # Update existing alias, then delete the old index\n\n # Find index that alias currently points to, we'll delete it after\n # updating the alias\n old_index = self.alias.aliased_indices()\n\n # Update alias to point to new index\n self.index.put_alias(self.alias.name)\n\n # Delete old index\n # aliased_indices() can return multiple indices. Delete them all\n for index in old_index:\n if index.name != self.index.name:\n index.delete()\n\n else:\n # self.alias doesn't currently refer to an alias in Elasticsearch.\n # This means that either nothing exists in ES with that name or\n # there is currently an index with the that name\n\n # Run delete on the alias, just in case it is currently an index.\n # This happens on the first rebuild after switching ATOMIC_REBUILD on\n self.alias.delete()\n\n # Create the alias\n self.index.put_alias(self.alias.name)\n\n\nclass Elasticsearch2SearchBackend(BaseSearchBackend):\n index_class = Elasticsearch2Index\n query_compiler_class = Elasticsearch2SearchQueryCompiler\n autocomplete_query_compiler_class = Elasticsearch2AutocompleteQueryCompiler\n results_class = Elasticsearch2SearchResults\n mapping_class = Elasticsearch2Mapping\n basic_rebuilder_class = ElasticsearchIndexRebuilder\n atomic_rebuilder_class = ElasticsearchAtomicIndexRebuilder\n catch_indexing_errors = True\n\n settings = {\n 'settings': {\n 'analysis': {\n 'analyzer': {\n 'ngram_analyzer': {\n 'type': 'custom',\n 'tokenizer': 'lowercase',\n 'filter': ['asciifolding', 'ngram']\n },\n 'edgengram_analyzer': {\n 'type': 'custom',\n 'tokenizer': 'lowercase',\n 'filter': ['asciifolding', 'edgengram']\n }\n },\n 'tokenizer': {\n 'ngram_tokenizer': {\n 'type': 'nGram',\n 'min_gram': 3,\n 'max_gram': 15,\n },\n 'edgengram_tokenizer': {\n 'type': 'edgeNGram',\n 'min_gram': 2,\n 'max_gram': 15,\n 'side': 'front'\n }\n },\n 'filter': {\n 'ngram': {\n 'type': 'nGram',\n 'min_gram': 3,\n 'max_gram': 15\n },\n 'edgengram': {\n 'type': 'edgeNGram',\n 'min_gram': 1,\n 'max_gram': 15\n }\n }\n }\n }\n }\n\n def __init__(self, params):\n super(Elasticsearch2SearchBackend, self).__init__(params)\n\n # Get settings\n self.hosts = params.pop('HOSTS', None)\n self.index_name = params.pop('INDEX', 'wagtail')\n self.timeout = params.pop('TIMEOUT', 10)\n\n if params.pop('ATOMIC_REBUILD', False):\n self.rebuilder_class = self.atomic_rebuilder_class\n else:\n self.rebuilder_class = self.basic_rebuilder_class\n\n # If HOSTS is not set, convert URLS setting to HOSTS\n es_urls = params.pop('URLS', ['http://localhost:9200'])\n if self.hosts is None:\n self.hosts = []\n\n # if es_urls is not a list, convert it to a list\n if isinstance(es_urls, str):\n es_urls = [es_urls]\n\n for url in es_urls:\n parsed_url = urlparse(url)\n\n use_ssl = parsed_url.scheme == 'https'\n port = parsed_url.port or (443 if use_ssl else 80)\n\n http_auth = None\n if parsed_url.username is not None and parsed_url.password is not None:\n http_auth = (parsed_url.username, parsed_url.password)\n\n self.hosts.append({\n 'host': parsed_url.hostname,\n 'port': port,\n 'url_prefix': parsed_url.path,\n 'use_ssl': use_ssl,\n 'verify_certs': use_ssl,\n 'http_auth': http_auth,\n })\n\n self.settings = copy.deepcopy(self.settings) # Make the class settings attribute as instance settings attribute\n self.settings = deep_update(self.settings, params.pop(\"INDEX_SETTINGS\", {}))\n\n # Get Elasticsearch interface\n # Any remaining params are passed into the Elasticsearch constructor\n options = params.pop('OPTIONS', {})\n\n self.es = Elasticsearch(\n hosts=self.hosts,\n timeout=self.timeout,\n **options)\n\n def get_index_for_model(self, model):\n # Split models up into separate indices based on their root model.\n # For example, all page-derived models get put together in one index,\n # while images and documents each have their own index.\n root_model = get_model_root(model)\n index_suffix = '__' + root_model._meta.app_label.lower() + '_' + root_model.__name__.lower()\n\n return self.index_class(self, self.index_name + index_suffix)\n\n def get_index(self):\n return self.index_class(self, self.index_name)\n\n def get_rebuilder(self):\n return self.rebuilder_class(self.get_index())\n\n def reset_index(self):\n # Use the rebuilder to reset the index\n self.get_rebuilder().reset_index()\n\n\nSearchBackend = Elasticsearch2SearchBackend\n", "path": "wagtail/search/backends/elasticsearch2.py" } ]
diff --git a/wagtail/search/backends/elasticsearch2.py b/wagtail/search/backends/elasticsearch2.py index 57381ec39206..14e6bb3a1fb7 100644 --- a/wagtail/search/backends/elasticsearch2.py +++ b/wagtail/search/backends/elasticsearch2.py @@ -1070,6 +1070,10 @@ def __init__(self, params): if self.hosts is None: self.hosts = [] + # if es_urls is not a list, convert it to a list + if isinstance(es_urls, str): + es_urls = [es_urls] + for url in es_urls: parsed_url = urlparse(url) diff --git a/wagtail/search/tests/test_elasticsearch2_backend.py b/wagtail/search/tests/test_elasticsearch2_backend.py index 6ac36faf01f2..a289bb89f97e 100644 --- a/wagtail/search/tests/test_elasticsearch2_backend.py +++ b/wagtail/search/tests/test_elasticsearch2_backend.py @@ -833,6 +833,25 @@ def test_urls(self, Elasticsearch): timeout=10 ) + def test_urls_as_string_works(self, Elasticsearch): + Elasticsearch2SearchBackend(params={ + 'URLS': 'http://localhost:9200' + }) + + Elasticsearch.assert_called_with( + hosts=[ + { + 'host': 'localhost', + 'port': 9200, + 'url_prefix': '', + 'use_ssl': False, + 'verify_certs': False, + 'http_auth': None, + } + ], + timeout=10 + ) + class TestGetModelRoot(TestCase): def test_root_model(self):
sopel-irc__sopel-1261
Results from currency conversion should be rounded to 2 decimals At the moment, the results of a currency conversion query is reported as a float: < mynick> .cur 100 SEK to USD < mybot> 100.0 SEK (Swedish krona) = 12.202017114914426 USD (US dollar) As far as I know, no currency reports more than 2 decimals, and some even less (JPY comes to mind). Rounding the result to 2 decimals will result in more readable output. < mynick> .cur 100 SEK to USD < mybot> 100.0 SEK (Swedish krona) = 12.20 USD (US dollar)
[ { "content": "# coding=utf-8\n# Copyright 2013 Elsie Powell, embolalia.com\n# Licensed under the Eiffel Forum License 2\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nfrom requests import get\nfrom sopel.module import commands, example, NOLIMIT\n\n# The Canadian central bank has better exchange rate data than the Fed, the\n# Bank of England, or the European Central Bank. Who knew?\nbase_url = 'http://www.bankofcanada.ca/stats/assets/rates_rss/noon/en_{}.xml'\nregex = re.compile(r'''\n (\\d+(?:\\.\\d+)?) # Decimal number\n \\s*([a-zA-Z]{3}) # 3-letter currency code\n \\s+(?:in|as|of|to)\\s+ # preposition\n ([a-zA-Z]{3}) # 3-letter currency code\n ''', re.VERBOSE)\n\n\ndef get_rate(code):\n code = code.upper()\n if code == 'CAD':\n return 1, 'Canadian Dollar'\n elif code == 'BTC':\n btc_rate = get('https://apiv2.bitcoinaverage.com/indices/global/ticker/BTCCAD')\n rates = btc_rate.json()\n return 1 / rates['averages']['day'], 'Bitcoin—24hr average'\n\n data = get(\"http://www.bankofcanada.ca/valet/observations/FX{}CAD/json\".format(code))\n name = data.json()['seriesDetail']['FX{}CAD'.format(code)]['description']\n name = name.split(\" to Canadian\")[0]\n json = data.json()['observations']\n for element in reversed(json):\n if 'v' in element['FX{}CAD'.format(code)]:\n return 1 / float(element['FX{}CAD'.format(code)]['v']), name\n\n\n@commands('cur', 'currency', 'exchange')\n@example('.cur 20 EUR in USD')\ndef exchange(bot, trigger):\n \"\"\"Show the exchange rate between two currencies\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No search term. An example: .cur 20 EUR in USD\")\n match = regex.match(trigger.group(2))\n if not match:\n # It's apologetic, because it's using Canadian data.\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n amount, of, to = match.groups()\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n display(bot, amount, of, to)\n\n\ndef display(bot, amount, of, to):\n if not amount:\n bot.reply(\"Zero is zero, no matter what country you're in.\")\n try:\n of_rate, of_name = get_rate(of)\n if not of_name:\n bot.reply(\"Unknown currency: %s\" % of)\n return\n to_rate, to_name = get_rate(to)\n if not to_name:\n bot.reply(\"Unknown currency: %s\" % to)\n return\n except Exception:\n bot.reply(\"Something went wrong while I was getting the exchange rate.\")\n return NOLIMIT\n\n result = amount / of_rate * to_rate\n bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of.upper(), of_name,\n result, to.upper(), to_name))\n\n\n@commands('btc', 'bitcoin')\n@example('.btc 20 EUR')\ndef bitcoin(bot, trigger):\n #if 2 args, 1st is number and 2nd is currency. If 1 arg, it's either the number or the currency.\n to = trigger.group(4)\n amount = trigger.group(3)\n if not to:\n to = trigger.group(3) or 'USD'\n amount = 1\n\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n display(bot, amount, 'BTC', to)\n", "path": "sopel/modules/currency.py" } ]
[ { "content": "# coding=utf-8\n# Copyright 2013 Elsie Powell, embolalia.com\n# Licensed under the Eiffel Forum License 2\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nfrom requests import get\nfrom sopel.module import commands, example, NOLIMIT\n\n# The Canadian central bank has better exchange rate data than the Fed, the\n# Bank of England, or the European Central Bank. Who knew?\nbase_url = 'http://www.bankofcanada.ca/stats/assets/rates_rss/noon/en_{}.xml'\nregex = re.compile(r'''\n (\\d+(?:\\.\\d+)?) # Decimal number\n \\s*([a-zA-Z]{3}) # 3-letter currency code\n \\s+(?:in|as|of|to)\\s+ # preposition\n ([a-zA-Z]{3}) # 3-letter currency code\n ''', re.VERBOSE)\n\n\ndef get_rate(code):\n code = code.upper()\n if code == 'CAD':\n return 1, 'Canadian Dollar'\n elif code == 'BTC':\n btc_rate = get('https://apiv2.bitcoinaverage.com/indices/global/ticker/BTCCAD')\n rates = btc_rate.json()\n return 1 / rates['averages']['day'], 'Bitcoin—24hr average'\n\n data = get(\"http://www.bankofcanada.ca/valet/observations/FX{}CAD/json\".format(code))\n name = data.json()['seriesDetail']['FX{}CAD'.format(code)]['description']\n name = name.split(\" to Canadian\")[0]\n json = data.json()['observations']\n for element in reversed(json):\n if 'v' in element['FX{}CAD'.format(code)]:\n return 1 / float(element['FX{}CAD'.format(code)]['v']), name\n\n\n@commands('cur', 'currency', 'exchange')\n@example('.cur 20 EUR in USD')\ndef exchange(bot, trigger):\n \"\"\"Show the exchange rate between two currencies\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No search term. An example: .cur 20 EUR in USD\")\n match = regex.match(trigger.group(2))\n if not match:\n # It's apologetic, because it's using Canadian data.\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n amount, of, to = match.groups()\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n display(bot, amount, of, to)\n\n\ndef display(bot, amount, of, to):\n if not amount:\n bot.reply(\"Zero is zero, no matter what country you're in.\")\n try:\n of_rate, of_name = get_rate(of)\n if not of_name:\n bot.reply(\"Unknown currency: %s\" % of)\n return\n to_rate, to_name = get_rate(to)\n if not to_name:\n bot.reply(\"Unknown currency: %s\" % to)\n return\n except Exception:\n bot.reply(\"Something went wrong while I was getting the exchange rate.\")\n return NOLIMIT\n\n result = amount / of_rate * to_rate\n bot.say(\"{:.2f} {} ({}) = {:.2f} {} ({})\".format(amount, of.upper(), of_name,\n result, to.upper(), to_name))\n\n\n@commands('btc', 'bitcoin')\n@example('.btc 20 EUR')\ndef bitcoin(bot, trigger):\n #if 2 args, 1st is number and 2nd is currency. If 1 arg, it's either the number or the currency.\n to = trigger.group(4)\n amount = trigger.group(3)\n if not to:\n to = trigger.group(3) or 'USD'\n amount = 1\n\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n display(bot, amount, 'BTC', to)\n", "path": "sopel/modules/currency.py" } ]
diff --git a/sopel/modules/currency.py b/sopel/modules/currency.py index c81cee2cf9..27087b5c29 100644 --- a/sopel/modules/currency.py +++ b/sopel/modules/currency.py @@ -74,7 +74,7 @@ def display(bot, amount, of, to): return NOLIMIT result = amount / of_rate * to_rate - bot.say("{} {} ({}) = {} {} ({})".format(amount, of.upper(), of_name, + bot.say("{:.2f} {} ({}) = {:.2f} {} ({})".format(amount, of.upper(), of_name, result, to.upper(), to_name))
scikit-hep__pyhf-1306
iminuit v2.4.0 breaks test_optim # Description [`iminuit` `v2.4.0`](https://github.com/scikit-hep/iminuit/releases/tag/v2.4.0) (released today 2021-02-10) is breaking the tests for 32b and 64b minuit in the `test_minimize` tests https://github.com/scikit-hep/pyhf/blob/bc32a695aaea0b79b3f0b40676446452d115cf8e/tests/test_optim.py#L49 ```pytb _______________ test_minimize[do_grad-minuit-jax-64b-do_stitch] ________________ tensorlib = <class 'pyhf.tensor.jax_backend.jax_backend'>, precision = '64b' optimizer = <class 'pyhf.optimize.minuit_optimizer'>, do_grad = True do_stitch = True @pytest.mark.parametrize('do_stitch', [False, True], ids=['no_stitch', 'do_stitch']) @pytest.mark.parametrize('precision', ['32b', '64b'], ids=['32b', '64b']) @pytest.mark.parametrize( 'tensorlib', [ pyhf.tensor.numpy_backend, pyhf.tensor.pytorch_backend, pyhf.tensor.tensorflow_backend, pyhf.tensor.jax_backend, ], ids=['numpy', 'pytorch', 'tensorflow', 'jax'], ) @pytest.mark.parametrize( 'optimizer', [pyhf.optimize.scipy_optimizer, pyhf.optimize.minuit_optimizer], ids=['scipy', 'minuit'], ) @pytest.mark.parametrize('do_grad', [False, True], ids=['no_grad', 'do_grad']) def test_minimize(tensorlib, precision, optimizer, do_grad, do_stitch): pyhf.set_backend(tensorlib(precision=precision), optimizer()) m = pyhf.simplemodels.hepdata_like([50.0], [100.0], [10.0]) data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata) # numpy does not support grad if pyhf.tensorlib.name == 'numpy' and do_grad: with pytest.raises(pyhf.exceptions.Unsupported): pyhf.infer.mle.fit(data, m, do_grad=do_grad) else: identifier = f'{"do_grad" if do_grad else "no_grad"}-{pyhf.optimizer.name}-{pyhf.tensorlib.name}-{pyhf.tensorlib.precision}' expected = { # numpy does not do grad 'do_grad-scipy-numpy-32b': None, 'do_grad-scipy-numpy-64b': None, 'do_grad-minuit-numpy-32b': None, 'do_grad-minuit-numpy-64b': None, # no grad, scipy, 32b - never works 'no_grad-scipy-numpy-32b': [1.0, 1.0], 'no_grad-scipy-pytorch-32b': [1.0, 1.0], 'no_grad-scipy-tensorflow-32b': [1.0, 1.0], 'no_grad-scipy-jax-32b': [1.0, 1.0], # no grad, scipy, 64b 'no_grad-scipy-numpy-64b': [0.49998815367220306, 0.9999696999038924], 'no_grad-scipy-pytorch-64b': [0.49998815367220306, 0.9999696999038924], 'no_grad-scipy-tensorflow-64b': [0.49998865164653106, 0.9999696533705097], 'no_grad-scipy-jax-64b': [0.4999880886490433, 0.9999696971774877], # do grad, scipy, 32b 'do_grad-scipy-pytorch-32b': [0.49993881583213806, 1.0001085996627808], 'do_grad-scipy-tensorflow-32b': [0.4999384582042694, 1.0001084804534912], 'do_grad-scipy-jax-32b': [0.4999389052391052, 1.0001085996627808], # do grad, scipy, 64b 'do_grad-scipy-pytorch-64b': [0.49998837853531425, 0.9999696648069287], 'do_grad-scipy-tensorflow-64b': [0.4999883785353142, 0.9999696648069278], 'do_grad-scipy-jax-64b': [0.49998837853531414, 0.9999696648069285], # no grad, minuit, 32b - not very consistent for pytorch 'no_grad-minuit-numpy-32b': [0.49622172117233276, 1.0007264614105225], # nb: macos gives different numerics than CI # 'no_grad-minuit-pytorch-32b': [0.7465415000915527, 0.8796938061714172], 'no_grad-minuit-pytorch-32b': [0.9684963226318359, 0.9171305894851685], 'no_grad-minuit-tensorflow-32b': [0.5284154415130615, 0.9911751747131348], # 'no_grad-minuit-jax-32b': [0.5144518613815308, 0.9927923679351807], 'no_grad-minuit-jax-32b': [0.49620240926742554, 1.0018986463546753], # no grad, minuit, 64b - quite consistent 'no_grad-minuit-numpy-64b': [0.5000493563629738, 1.0000043833598724], 'no_grad-minuit-pytorch-64b': [0.5000493563758468, 1.0000043833508256], 'no_grad-minuit-tensorflow-64b': [0.5000493563645547, 1.0000043833598657], 'no_grad-minuit-jax-64b': [0.5000493563528641, 1.0000043833614634], # do grad, minuit, 32b 'do_grad-minuit-pytorch-32b': [0.5017611384391785, 0.9997190237045288], 'do_grad-minuit-tensorflow-32b': [0.5012885928153992, 1.0000673532485962], # 'do_grad-minuit-jax-32b': [0.5029529333114624, 0.9991086721420288], 'do_grad-minuit-jax-32b': [0.5007095336914062, 0.9999282360076904], # do grad, minuit, 64b 'do_grad-minuit-pytorch-64b': [0.500273961181471, 0.9996310135736226], 'do_grad-minuit-tensorflow-64b': [0.500273961167223, 0.9996310135864218], 'do_grad-minuit-jax-64b': [0.5002739611532436, 0.9996310135970794], }[identifier] result = pyhf.infer.mle.fit(data, m, do_grad=do_grad, do_stitch=do_stitch) rtol = 2e-06 # handle cases where macos and ubuntu provide very different results numerical if 'no_grad-minuit-tensorflow-32b' in identifier: # not a very large difference, so we bump the relative difference down rtol = 3e-02 if 'no_grad-minuit-pytorch-32b' in identifier: # quite a large difference rtol = 3e-01 if 'do_grad-minuit-pytorch-32b' in identifier: # a small difference rtol = 7e-05 if 'no_grad-minuit-jax-32b' in identifier: rtol = 4e-02 if 'do_grad-minuit-jax-32b' in identifier: rtol = 5e-03 # check fitted parameters > assert pytest.approx(expected, rel=rtol) == pyhf.tensorlib.tolist( result ), f"{identifier} = {pyhf.tensorlib.tolist(result)}" E AssertionError: do_grad-minuit-jax-64b = [0.500049321731032, 1.0000044174002167] E assert approx([0.5002739611532436 ± 1.0e-06, 0.9996310135970794 ± 2.0e-06]) == [0.500049321731032, 1.0000044174002167] E + where approx([0.5002739611532436 ± 1.0e-06, 0.9996310135970794 ± 2.0e-06]) = <function approx at 0x7fb30c6b6e50>([0.5002739611532436, 0.9996310135970794], rel=2e-06) E + where <function approx at 0x7fb30c6b6e50> = pytest.approx E + and [0.500049321731032, 1.0000044174002167] = <bound method jax_backend.tolist of <pyhf.tensor.jax_backend.jax_backend object at 0x7fb210064b00>>(DeviceArray([0.50004932, 1.00000442], dtype=float64)) E + where <bound method jax_backend.tolist of <pyhf.tensor.jax_backend.jax_backend object at 0x7fb210064b00>> = <pyhf.tensor.jax_backend.jax_backend object at 0x7fb210064b00>.tolist E + where <pyhf.tensor.jax_backend.jax_backend object at 0x7fb210064b00> = pyhf.tensorlib tests/test_optim.py:126: AssertionError ``` `test_minuit_strategy_do_grad` https://github.com/scikit-hep/pyhf/blob/bc32a695aaea0b79b3f0b40676446452d115cf8e/tests/test_optim.py#L182 as well as in `test_minuit_strategy_global` tests https://github.com/scikit-hep/pyhf/blob/bc32a695aaea0b79b3f0b40676446452d115cf8e/tests/test_optim.py#L210 ```pytb __________________ test_minuit_strategy_global[tensorflow-1] ___________________ self = <pyhf.optimize.minuit_optimizer object at 0x7fb2107be700> func = <function wrap_objective.<locals>.func at 0x7fb228255a60> x0 = [1.0, 1.0], do_grad = True, bounds = [(0, 10), (1e-10, 10.0)] fixed_vals = [], options = {} def _internal_minimize( self, func, x0, do_grad=False, bounds=None, fixed_vals=None, options={} ): minimizer = self._get_minimizer( func, x0, bounds, fixed_vals=fixed_vals, do_grad=do_grad ) result = self._minimize( minimizer, func, x0, do_grad=do_grad, bounds=bounds, fixed_vals=fixed_vals, options=options, ) try: > assert result.success E AssertionError src/pyhf/optimize/mixins.py:49: AssertionError During handling of the above exception, another exception occurred: mocker = <pytest_mock.plugin.MockerFixture object at 0x7fb1e3d59370> backend = (<pyhf.tensor.tensorflow_backend.tensorflow_backend object at 0x7fb2433ed080>, None) strategy = 1 @pytest.mark.parametrize('strategy', [0, 1]) def test_minuit_strategy_global(mocker, backend, strategy): pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy)) spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize') m = pyhf.simplemodels.hepdata_like([50.0], [100.0], [10.0]) data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata) do_grad = pyhf.tensorlib.default_do_grad > pyhf.infer.mle.fit(data, m) tests/test_optim.py:217: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ src/pyhf/infer/mle.py:122: in fit return opt.minimize( src/pyhf/optimize/mixins.py:157: in minimize result = self._internal_minimize(**minimizer_kwargs, options=kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <pyhf.optimize.minuit_optimizer object at 0x7fb2107be700> func = <function wrap_objective.<locals>.func at 0x7fb228255a60> x0 = [1.0, 1.0], do_grad = True, bounds = [(0, 10), (1e-10, 10.0)] fixed_vals = [], options = {} def _internal_minimize( self, func, x0, do_grad=False, bounds=None, fixed_vals=None, options={} ): minimizer = self._get_minimizer( func, x0, bounds, fixed_vals=fixed_vals, do_grad=do_grad ) result = self._minimize( minimizer, func, x0, do_grad=do_grad, bounds=bounds, fixed_vals=fixed_vals, options=options, ) try: assert result.success except AssertionError: log.error(result, exc_info=True) > raise exceptions.FailedMinimization(result) E pyhf.exceptions.FailedMinimization: Optimization failed. Estimated distance to minimum too large. ``` We will need to investigate what's up and perhaps loosen tolerances for `iminuit`.
[ { "content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.1', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.1',\n ],\n 'torch': ['torch~=1.8'],\n 'jax': ['jax~=0.2.8', 'jaxlib~=0.1.58'],\n 'xmlio': [\n 'uproot3>=3.14.1',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py" } ]
[ { "content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.1', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.1',\n ],\n 'torch': ['torch~=1.8'],\n 'jax': ['jax~=0.2.8', 'jaxlib~=0.1.58'],\n 'xmlio': [\n 'uproot3>=3.14.1',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit>=2.4'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py" } ]
diff --git a/lower-bound-requirements.txt b/lower-bound-requirements.txt index a6bc3ad546..6e8c03b154 100644 --- a/lower-bound-requirements.txt +++ b/lower-bound-requirements.txt @@ -9,7 +9,7 @@ pyyaml==5.1 uproot3==3.14.1 uproot==4.0.0 # minuit -iminuit==2.1.0 +iminuit==2.4.0 # tensorflow tensorflow==2.2.1 # c.f. PR #1001 tensorflow-probability==0.10.1 diff --git a/setup.py b/setup.py index 802d7cbd00..0123d966c8 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ 'uproot3>=3.14.1', 'uproot~=4.0', ], # uproot3 required until writing to ROOT supported in uproot4 - 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood + 'minuit': ['iminuit>=2.4'], } extras_require['backends'] = sorted( set( diff --git a/tests/test_optim.py b/tests/test_optim.py index 06dbf36206..256a73c369 100644 --- a/tests/test_optim.py +++ b/tests/test_optim.py @@ -83,10 +83,8 @@ def test_minimize(tensorlib, precision, optimizer, do_grad, do_stitch): # no grad, minuit, 32b - not very consistent for pytorch 'no_grad-minuit-numpy-32b': [0.7465415000915527, 0.8796938061714172], # nb: macos gives different numerics than CI - # 'no_grad-minuit-pytorch-32b': [0.7465415000915527, 0.8796938061714172], 'no_grad-minuit-pytorch-32b': [0.9684963226318359, 0.9171305894851685], 'no_grad-minuit-tensorflow-32b': [0.5284154415130615, 0.9911751747131348], - # 'no_grad-minuit-jax-32b': [0.5144518613815308, 0.9927923679351807], 'no_grad-minuit-jax-32b': [0.49620240926742554, 1.0018986463546753], # no grad, minuit, 64b - quite consistent 'no_grad-minuit-numpy-64b': [0.5000493563629738, 1.0000043833598724], @@ -94,39 +92,47 @@ def test_minimize(tensorlib, precision, optimizer, do_grad, do_stitch): 'no_grad-minuit-tensorflow-64b': [0.5000493563645547, 1.0000043833598657], 'no_grad-minuit-jax-64b': [0.5000493563528641, 1.0000043833614634], # do grad, minuit, 32b - 'do_grad-minuit-pytorch-32b': [0.5017611384391785, 0.9997190237045288], - 'do_grad-minuit-tensorflow-32b': [0.5012885928153992, 1.0000673532485962], - # 'do_grad-minuit-jax-32b': [0.5029529333114624, 0.9991086721420288], + # large divergence by tensorflow and pytorch + 'do_grad-minuit-pytorch-32b': [0.9731879234313965, 0.9999999403953552], + 'do_grad-minuit-tensorflow-32b': [0.9366918206214905, 0.9126002788543701], 'do_grad-minuit-jax-32b': [0.5007095336914062, 0.9999282360076904], # do grad, minuit, 64b - 'do_grad-minuit-pytorch-64b': [0.500273961181471, 0.9996310135736226], - 'do_grad-minuit-tensorflow-64b': [0.500273961167223, 0.9996310135864218], - 'do_grad-minuit-jax-64b': [0.5002739611532436, 0.9996310135970794], + 'do_grad-minuit-pytorch-64b': [0.500049321728735, 1.00000441739846], + 'do_grad-minuit-tensorflow-64b': [0.5000492930412292, 1.0000044107437134], + 'do_grad-minuit-jax-64b': [0.500049321731032, 1.0000044174002167], }[identifier] result = pyhf.infer.mle.fit(data, m, do_grad=do_grad, do_stitch=do_stitch) - rtol = 2e-06 + rel_tol = 1e-6 + # Fluctuations beyond precision shouldn't matter + abs_tol = 1e-5 if "32b" in identifier else 1e-8 + # handle cases where macos and ubuntu provide very different results numerical - if 'no_grad-minuit-tensorflow-32b' in identifier: - # not a very large difference, so we bump the relative difference down - rtol = 3e-02 - if 'no_grad-minuit-pytorch-32b' in identifier: - # quite a large difference - rtol = 3e-01 - if 'do_grad-minuit-pytorch-32b' in identifier: - # a small difference - rtol = 7e-05 - if 'no_grad-minuit-jax-32b' in identifier: - rtol = 4e-02 - # NB: ubuntu and macos give different results for 32b - if "do_grad-scipy-jax-32b" in identifier: - rtol = 5e-03 - if "do_grad-minuit-jax-32b" in identifier: - rtol = 5e-03 + if "no_grad" in identifier: + rel_tol = 1e-5 + if "minuit-pytorch-32b" in identifier: + # large difference between local and CI + rel_tol = 3e-1 + if "minuit-tensorflow-32b" in identifier: + # not a very large difference, so we bump the relative difference down + rel_tol = 3e-2 + if "minuit-jax-32b" in identifier: + rel_tol = 4e-2 + elif all(part in identifier for part in ["do_grad", "32b"]): + if "scipy-jax" in identifier: + rel_tol = 1e-2 + # NB: ubuntu and macos give different results for 32b + if "minuit-tensorflow" in identifier: + # large difference between local and CI + rel_tol = 1e-1 + if "minuit-jax" in identifier: + rel_tol = 1e-2 # check fitted parameters - assert pytest.approx(expected, rel=rtol) == pyhf.tensorlib.tolist( + assert pytest.approx( + expected, rel=rel_tol, abs=abs_tol + ) == pyhf.tensorlib.tolist( result ), f"{identifier} = {pyhf.tensorlib.tolist(result)}" @@ -190,7 +196,7 @@ def test_minuit_strategy_do_grad(mocker, backend): the minuit strategy=0. When there is no user-provided gradient, check that one automatically sets the minuit strategy=1. """ - pyhf.set_backend(pyhf.tensorlib, 'minuit') + pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(tolerance=0.2)) spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize') m = pyhf.simplemodels.hepdata_like([50.0], [100.0], [10.0]) data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata) @@ -211,7 +217,9 @@ def test_minuit_strategy_do_grad(mocker, backend): @pytest.mark.parametrize('strategy', [0, 1]) def test_minuit_strategy_global(mocker, backend, strategy): - pyhf.set_backend(pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy)) + pyhf.set_backend( + pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy, tolerance=0.2) + ) spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize') m = pyhf.simplemodels.hepdata_like([50.0], [100.0], [10.0]) data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata)
python-poetry__poetry-2750
1.1.0b2 causes EnvCommandError (Non-posix paths with file:// protocol on Windows) <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this; let's make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill these first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate. - [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). <!-- Once those are done, if you're able to fill in the following list with your information, it'd be very helpful to whoever handles the issue. --> - **OS version and name**: Windows 10 - **Poetry version**: 1.1.0b2 - **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: <!-- Gist Link Here --> ## Issue When on Poetry 1.0.10 there are no issues. But after having updated to poetry 1.1.0b2, I constantly get an EnvCommandError caused by a FileNotFoundError (a temporary setup.py file is not found): ```bash $ poetry --version Poetry version 1.1.0b2 $ poetry lock Updating dependencies Resolving dependencies... Writing lock file $ poetry install -vvv Using virtualenv: C:\Users\fredrik\code\repos\mylittleci\venv Installing dependencies from lock file Finding the necessary packages for the current system Package operations: 2 installs, 0 updates, 0 removals • Installing zipp (3.1.0) Stack trace: 7 c:\users\fredrik\.local\pipx\venvs\poetry\lib\site-packages\poetry\installation\executor.py:183 in _execute_operation 181│ 182│ try: → 183│ result = self._do_execute_operation(operation) 184│ except EnvCommandError as e: 185│ if e.e.returncode == -2: 6 c:\users\fredrik\.local\pipx\venvs\poetry\lib\site-packages\poetry\installation\executor.py:257 in _do_execute_operation 255│ return 0 256│ → 257│ result = getattr(self, "_execute_{}".format(method))(operation) 258│ 259│ if result != 0: 5 c:\users\fredrik\.local\pipx\venvs\poetry\lib\site-packages\poetry\installation\executor.py:394 in _execute_install 392│ 393│ def _execute_install(self, operation): # type: (Install) -> None → 394│ return self._install(operation) 395│ 396│ def _execute_update(self, operation): # type: (Update) -> None 4 c:\users\fredrik\.local\pipx\venvs\poetry\lib\site-packages\poetry\installation\executor.py:432 in _install 430│ args.insert(2, "-U") 431│ → 432│ return self.run_pip(*args) 433│ 434│ def _update(self, operation): 3 c:\users\fredrik\.local\pipx\venvs\poetry\lib\site-packages\poetry\installation\executor.py:283 in run_pip 281│ def run_pip(self, *args, **kwargs): # type: (...) -> int 282│ try: → 283│ self._env.run_pip(*args, **kwargs) 284│ except EnvCommandError as e: 285│ output = decode(e.e.output) 2 c:\users\fredrik\.local\pipx\venvs\poetry\lib\site-packages\poetry\utils\env.py:883 in run_pip 881│ pip = self.get_pip_command() 882│ cmd = pip + list(args) → 883│ return self._run(cmd, **kwargs) 884│ 885│ def _run(self, cmd, **kwargs): 1 c:\users\fredrik\.local\pipx\venvs\poetry\lib\site-packages\poetry\utils\env.py:1151 in _run 1149│ self.unset_env("__PYVENV_LAUNCHER__") 1150│ → 1151│ return super(VirtualEnv, self)._run(cmd, **kwargs) 1152│ 1153│ def execute(self, bin, *args, **kwargs): EnvCommandError Command C:\Users\fredrik\code\repos\mylittleci\venv\Scripts\pip.exe install --no-deps file://C:\Users\fredrik\AppData\Local\pypoetry\Cache\artifacts\3f\29\e0\445941d0045028dce97085f4263281587225a49fa7dd09f41e4c2752af\zipp-3.1.0-py3-none-any.whl errored with the following return code 1, and output: Processing c:\users\fredrik\appdata\local\pypoetry\cache\artifacts\3f\29\e0\445941d0045028dce97085f4263281587225a49fa7dd09f41e4c2752af\zipp-3.1.0-py3-none-any.whl ERROR: Command errored out with exit status 1: command: 'c:\users\fredrik\code\repos\mylittleci\venv\scripts\python.exe' -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'C:\\Users\\fredrik\\AppData\\Local\\Temp\\pip-req-build-gsp4euj0\\setup.py'"'"'; __file__='"'"'C:\\Users\\fredrik\\AppData\\Local\\Temp\\pip-req-build-gsp4euj0\\setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' egg_info --egg-base 'C:\Users\fredrik\AppData\Local\Temp\pip-pip-egg-info-_md_1odm' cwd: C:\Users\fredrik\AppData\Local\Temp\pip-req-build-gsp4euj0\ Complete output (5 lines): Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\Users\fredrik\AppData\Local\Programs\Python\Python37\lib\tokenize.py", line 447, in open buffer = _builtin_open(filename, 'rb') FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\fredrik\\AppData\\Local\\Temp\\pip-req-build-gsp4euj0\\setup.py' ---------------------------------------- ERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output. at c:\users\fredrik\.local\pipx\venvs\poetry\lib\site-packages\poetry\utils\env.py:915 in _run 911│ output = subprocess.check_output( 912│ cmd, stderr=subprocess.STDOUT, **kwargs 913│ ) 914│ except CalledProcessError as e: → 915│ raise EnvCommandError(e, input=input_) 916│ 917│ return decode(output) 918│ 919│ def execute(self, bin, *args, **kwargs): ``` The contents of my `pyproject.toml`: ``` [tool.poetry] name = "mylittleci" version = "0.0.1" authors = ["Fredrik <***@***.***>"] description = "mylittleci" [tool.poetry.dependencies] python = "^3.7" [tool.poetry.dev-dependencies] importlib_metadata = "^1.7.0" [tool.poetry.scripts] calculator = "mylittleci.cli.calculator:main" [build-system] # https://python-poetry.org/docs/faq/#is-tox-supported requires = ["poetry-core>=1.0.0a5"] build-backend = "poetry.core.masonry.api" ```
[ { "content": "import hashlib\nimport json\n\nfrom typing import TYPE_CHECKING\n\nfrom poetry.core.packages.utils.link import Link\nfrom poetry.utils._compat import Path\n\nfrom .chooser import InvalidWheelName\nfrom .chooser import Wheel\n\n\nif TYPE_CHECKING:\n from typing import List\n from typing import Optional\n\n from poetry.config.config import Config\n from poetry.utils.env import Env\n\n\nclass Chef:\n def __init__(self, config, env): # type: (Config, Env) -> None\n self._config = config\n self._env = env\n self._cache_dir = (\n Path(config.get(\"cache-dir\")).expanduser().joinpath(\"artifacts\")\n )\n\n def prepare(self, archive): # type: (Path) -> Path\n return archive\n\n def prepare_sdist(self, archive): # type: (Path) -> Path\n return archive\n\n def prepare_wheel(self, archive): # type: (Path) -> Path\n return archive\n\n def should_prepare(self, archive): # type: (Path) -> bool\n return not self.is_wheel(archive)\n\n def is_wheel(self, archive): # type: (Path) -> bool\n return archive.suffix == \".whl\"\n\n def get_cached_archive_for_link(self, link): # type: (Link) -> Optional[Link]\n # If the archive is already a wheel, there is no need to cache it.\n if link.is_wheel:\n pass\n\n archives = self.get_cached_archives_for_link(link)\n\n if not archives:\n return link\n\n candidates = []\n for archive in archives:\n if not archive.is_wheel:\n candidates.append((float(\"inf\"), archive))\n continue\n\n try:\n wheel = Wheel(archive.filename)\n except InvalidWheelName:\n continue\n\n if not wheel.is_supported_by_environment(self._env):\n continue\n\n candidates.append(\n (wheel.get_minimum_supported_index(self._env.supported_tags), archive),\n )\n\n if not candidates:\n return link\n\n return min(candidates)[1]\n\n def get_cached_archives_for_link(self, link): # type: (Link) -> List[Link]\n cache_dir = self.get_cache_directory_for_link(link)\n\n archive_types = [\"whl\", \"tar.gz\", \"tar.bz2\", \"bz2\", \"zip\"]\n links = []\n for archive_type in archive_types:\n for archive in cache_dir.glob(\"*.{}\".format(archive_type)):\n links.append(Link(\"file://{}\".format(str(archive))))\n\n return links\n\n def get_cache_directory_for_link(self, link): # type: (Link) -> Path\n key_parts = {\"url\": link.url_without_fragment}\n\n if link.hash_name is not None and link.hash is not None:\n key_parts[link.hash_name] = link.hash\n\n if link.subdirectory_fragment:\n key_parts[\"subdirectory\"] = link.subdirectory_fragment\n\n key_parts[\"interpreter_name\"] = self._env.marker_env[\"interpreter_name\"]\n key_parts[\"interpreter_version\"] = \"\".join(\n self._env.marker_env[\"interpreter_version\"].split(\".\")[:2]\n )\n\n key = hashlib.sha256(\n json.dumps(\n key_parts, sort_keys=True, separators=(\",\", \":\"), ensure_ascii=True\n ).encode(\"ascii\")\n ).hexdigest()\n\n split_key = [key[:2], key[2:4], key[4:6], key[6:]]\n\n return self._cache_dir.joinpath(*split_key)\n", "path": "poetry/installation/chef.py" } ]
[ { "content": "import hashlib\nimport json\n\nfrom typing import TYPE_CHECKING\n\nfrom poetry.core.packages.utils.link import Link\nfrom poetry.utils._compat import Path\n\nfrom .chooser import InvalidWheelName\nfrom .chooser import Wheel\n\n\nif TYPE_CHECKING:\n from typing import List\n from typing import Optional\n\n from poetry.config.config import Config\n from poetry.utils.env import Env\n\n\nclass Chef:\n def __init__(self, config, env): # type: (Config, Env) -> None\n self._config = config\n self._env = env\n self._cache_dir = (\n Path(config.get(\"cache-dir\")).expanduser().joinpath(\"artifacts\")\n )\n\n def prepare(self, archive): # type: (Path) -> Path\n return archive\n\n def prepare_sdist(self, archive): # type: (Path) -> Path\n return archive\n\n def prepare_wheel(self, archive): # type: (Path) -> Path\n return archive\n\n def should_prepare(self, archive): # type: (Path) -> bool\n return not self.is_wheel(archive)\n\n def is_wheel(self, archive): # type: (Path) -> bool\n return archive.suffix == \".whl\"\n\n def get_cached_archive_for_link(self, link): # type: (Link) -> Optional[Link]\n # If the archive is already a wheel, there is no need to cache it.\n if link.is_wheel:\n pass\n\n archives = self.get_cached_archives_for_link(link)\n\n if not archives:\n return link\n\n candidates = []\n for archive in archives:\n if not archive.is_wheel:\n candidates.append((float(\"inf\"), archive))\n continue\n\n try:\n wheel = Wheel(archive.filename)\n except InvalidWheelName:\n continue\n\n if not wheel.is_supported_by_environment(self._env):\n continue\n\n candidates.append(\n (wheel.get_minimum_supported_index(self._env.supported_tags), archive),\n )\n\n if not candidates:\n return link\n\n return min(candidates)[1]\n\n def get_cached_archives_for_link(self, link): # type: (Link) -> List[Link]\n cache_dir = self.get_cache_directory_for_link(link)\n\n archive_types = [\"whl\", \"tar.gz\", \"tar.bz2\", \"bz2\", \"zip\"]\n links = []\n for archive_type in archive_types:\n for archive in cache_dir.glob(\"*.{}\".format(archive_type)):\n links.append(Link(archive.as_uri()))\n\n return links\n\n def get_cache_directory_for_link(self, link): # type: (Link) -> Path\n key_parts = {\"url\": link.url_without_fragment}\n\n if link.hash_name is not None and link.hash is not None:\n key_parts[link.hash_name] = link.hash\n\n if link.subdirectory_fragment:\n key_parts[\"subdirectory\"] = link.subdirectory_fragment\n\n key_parts[\"interpreter_name\"] = self._env.marker_env[\"interpreter_name\"]\n key_parts[\"interpreter_version\"] = \"\".join(\n self._env.marker_env[\"interpreter_version\"].split(\".\")[:2]\n )\n\n key = hashlib.sha256(\n json.dumps(\n key_parts, sort_keys=True, separators=(\",\", \":\"), ensure_ascii=True\n ).encode(\"ascii\")\n ).hexdigest()\n\n split_key = [key[:2], key[2:4], key[4:6], key[6:]]\n\n return self._cache_dir.joinpath(*split_key)\n", "path": "poetry/installation/chef.py" } ]
diff --git a/poetry/installation/chef.py b/poetry/installation/chef.py index 9e556d7690b..669ce17768e 100644 --- a/poetry/installation/chef.py +++ b/poetry/installation/chef.py @@ -81,7 +81,7 @@ def get_cached_archives_for_link(self, link): # type: (Link) -> List[Link] links = [] for archive_type in archive_types: for archive in cache_dir.glob("*.{}".format(archive_type)): - links.append(Link("file://{}".format(str(archive)))) + links.append(Link(archive.as_uri())) return links diff --git a/tests/installation/test_chef.py b/tests/installation/test_chef.py index d44df3b8894..93e1823fdb8 100644 --- a/tests/installation/test_chef.py +++ b/tests/installation/test_chef.py @@ -44,17 +44,19 @@ def test_get_cached_archives_for_link(config, mocker): ), ) + distributions = Path(__file__).parent.parent.joinpath("fixtures/distributions") mocker.patch.object( - chef, - "get_cache_directory_for_link", - return_value=Path(__file__).parent.parent.joinpath("fixtures/distributions"), + chef, "get_cache_directory_for_link", return_value=distributions, ) archives = chef.get_cached_archives_for_link( Link("https://files.python-poetry.org/demo-0.1.0.tar.gz") ) - assert 2 == len(archives) + assert archives + assert set(archives) == { + Link(path.as_uri()) for path in distributions.glob("demo-0.1.0*") + } def test_get_cache_directory_for_link(config):
akvo__akvo-rsr-3587
Indicators and dimensions randomly re-order themselves when saved When I add an indicator framework and save it, for each result indicators seem to randomly re-order themselves. This is difficult, because there is a certain logic to them (for example outcomes on top, outputs below). The same happen for indicator dimensions (which are not able to move anymore after being saved in the wrong order either). Can someone of the technical team take a look at this, please?
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport datetime\nimport decimal\n\nfrom akvo.rsr.fields import (LatitudeField, LongitudeField,\n ProjectLimitedTextField, ValidXMLCharField,\n ValidXMLTextField)\nfrom akvo.rsr.models import (AdministrativeLocation, BudgetItemLabel, Country,\n CrsAdd, CrsAddOtherFlag, Fss, FssForecast,\n Indicator, IndicatorDimension, IndicatorLabel,\n IndicatorPeriod, IndicatorReference,\n IndicatorPeriodActualDimension,\n IndicatorPeriodActualLocation,\n IndicatorPeriodTargetDimension,\n IndicatorPeriodTargetLocation, Keyword,\n Organisation, Project, OrganisationIndicatorLabel,\n ProjectDocument, ProjectDocumentCategory, ProjectLocation,\n Result, Transaction, TransactionSector)\nfrom akvo.utils import DjangoModel\n\nfrom collections import namedtuple\n\nfrom django.apps import apps\nfrom django.contrib.admin.models import LogEntry, CHANGE\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import MultipleObjectsReturned, ValidationError\nfrom django.db.models import (BooleanField, DateField, DecimalField, EmailField,\n ForeignKey, ManyToManyField, NullBooleanField, PositiveIntegerField,\n PositiveSmallIntegerField, URLField)\nfrom django.utils.translation import ugettext_lazy as _\n\n\nRELATED_OBJECTS_MAPPING = {\n # Special mapping for related objects without a 'project' field\n Indicator: (Result, 'result'),\n IndicatorLabel: (Indicator, 'indicator'),\n IndicatorPeriod: (Indicator, 'indicator'),\n IndicatorReference: (Indicator, 'indicator'),\n IndicatorDimension: (Indicator, 'indicator'),\n IndicatorPeriodActualDimension: (IndicatorPeriod, 'period'),\n IndicatorPeriodActualLocation: (IndicatorPeriod, 'period'),\n IndicatorPeriodTargetDimension: (IndicatorPeriod, 'period'),\n IndicatorPeriodTargetLocation: (IndicatorPeriod, 'period'),\n TransactionSector: (Transaction, 'transaction'),\n ProjectLocation: (Project, 'location_target'),\n AdministrativeLocation: (ProjectLocation, 'location'),\n ProjectDocumentCategory: (ProjectDocument, 'document'),\n CrsAddOtherFlag: (CrsAdd, 'crs'),\n FssForecast: (Fss, 'fss'),\n}\n\nMANY_TO_MANY_FIELDS = {\n # Special mapping for many to many fields\n Keyword: 'keywords',\n}\n\n\ndef add_error(errors, message, field_name):\n \"\"\"Appends a new error to the errors list.\"\"\"\n if isinstance(message, BaseException):\n message = message.message\n errors.append(\n {'name': field_name,\n 'error': message.encode('utf8').capitalize()}\n )\n return errors\n\n\ndef log_changes(changes, user, project):\n \"\"\"Logs all changes to Django's LogEntry model.\"\"\"\n if changes:\n change_message = u''\n first_part = u'%s' % _(u'Project editor, changed: ')\n\n for obj_changes in changes:\n obj = obj_changes[0]\n fields = [obj_change[0] for obj_change in obj_changes[1]]\n\n if not isinstance(obj, Project):\n obj_change_message = u''\n\n for field in fields:\n obj_change_message += u'%s, ' % field\n\n LogEntry.objects.log_action(\n user_id=user.pk,\n content_type_id=ContentType.objects.get_for_model(obj).pk,\n object_id=obj.pk,\n object_repr=obj.__unicode__(),\n action_flag=CHANGE,\n change_message=first_part + obj_change_message[:-2] + u'.'\n )\n\n change_message += u'%s (id: %s): ' % (type(obj)._meta.verbose_name.capitalize(),\n str(obj.pk)) + obj_change_message\n\n else:\n for field in fields:\n change_message += u'%s, ' % field\n\n change_message = change_message[:-2] + u'.'\n\n LogEntry.objects.log_action(\n user_id=user.pk,\n content_type_id=ContentType.objects.get_for_model(project).pk,\n object_id=project.pk,\n object_repr=project.__unicode__(),\n action_flag=CHANGE,\n change_message=first_part + change_message\n )\n\n field_changes = []\n for change in changes:\n for fields in change[1]:\n if not (fields[1] == 'photo' or 'document-document-' in fields[1]):\n field_changes.append([fields[1], fields[2]])\n\n return field_changes\n\n return []\n\n\ndef split_key(key):\n \"\"\"\n Helper function for splitting the keys of the form data. Key input will be a string like\n 'rsr_relatedproject.relation.1234_new-0' and it will return a KeyInfo namedtuple\n \"\"\"\n KeyParts = namedtuple('KeyParts', 'model, field, ids')\n\n key_parts = KeyParts._make(key.split('.'))\n return KeyParts._make([\n DjangoModel._make([key_parts.model] + key_parts.model.split('_')),\n key_parts.field,\n key_parts.ids.split('_')\n ])\n\n\ndef pre_process_data(key, data, errors):\n \"\"\"\n Pre-process the data. Needed to transform some of the form data to usable data in the Django\n models. Returns the processed data and any errors that have occurred so far.\n \"\"\"\n # Retrieve field information first\n try:\n key_parts = split_key(key)\n except TypeError:\n return data, errors\n\n Model = apps.get_model(key_parts.model.app, key_parts.model.model_name)\n model_field = Model._meta.get_field(key_parts.field)\n\n # Text data does not need pre-processing\n if isinstance(model_field, (EmailField, ProjectLimitedTextField, URLField, ValidXMLCharField,\n ValidXMLTextField)):\n return data.strip(), errors\n\n # Dates should be converted to a datetime object, or None if empty\n if isinstance(model_field, DateField):\n if data:\n try:\n return datetime.datetime.strptime(data, \"%d/%m/%Y\").strftime(\"%Y-%m-%d\"), errors\n except ValueError as e:\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Integers should be converted to an integer\n if isinstance(model_field, (PositiveIntegerField, PositiveSmallIntegerField)):\n if data:\n try:\n return int(data), errors\n except ValueError as e:\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Decimals should be converted to a decimal\n if isinstance(model_field, DecimalField):\n if data:\n try:\n return decimal.Decimal(data), errors\n except decimal.InvalidOperation as e:\n if ',' in data:\n # Specific error message for commas\n e = u'%s' % _(u'It is not allowed to use a comma, use a period to denote '\n u'decimals.')\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Latitude and longitude should be converted to a float\n if isinstance(model_field, (LatitudeField, LongitudeField)):\n if data:\n try:\n return float(data), errors\n except ValueError as e:\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Booleans should be converted to True or False\n if isinstance(model_field, BooleanField):\n return (True, errors) if data == '1' else (False, errors)\n\n # Booleans should be converted to True, False or None\n if isinstance(model_field, NullBooleanField):\n if data == '1':\n return True, errors\n elif data == '2':\n return False, errors\n else:\n return None, errors\n\n # In case of a foreign key, we first check if this is a project or organisation foreign key.\n # Then the data should be converted to the related object.\n if isinstance(model_field, ForeignKey):\n if data:\n try:\n if 'project' in key_parts.field:\n return Project.objects.get(pk=int(data)), errors\n if 'organisation' in key_parts.field:\n return Organisation.objects.get(pk=int(data)), errors\n if key_parts.model.model_name == 'indicatorlabel' and key_parts.field == 'label':\n return OrganisationIndicatorLabel.objects.get(pk=int(data)), errors\n if key_parts.model.model_name == 'budgetitem' and key_parts.field == 'label':\n return BudgetItemLabel.objects.get(pk=int(data)), errors\n if 'country' in key_parts.field:\n return Country.objects.get(pk=int(data)), errors\n return None, errors\n except (Project.DoesNotExist, Organisation.DoesNotExist, BudgetItemLabel.DoesNotExist,\n Country.DoesNotExist, OrganisationIndicatorLabel.DoesNotExist) as e:\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Keywords is the only ManyToManyField\n if isinstance(model_field, ManyToManyField):\n try:\n return Keyword.objects.get(pk=int(data)), errors\n except Exception as e:\n errors = add_error(errors, e, key)\n return None, errors\n\n return data, errors\n\n\ndef convert_related_objects(rel_objects):\n \"\"\"\n Converts related objects (db_table without 'rsr_') to the API endpoint so that it can be used\n in the template.\n \"\"\"\n\n model_to_api = {\n 'relatedproject': 'related_project',\n 'humanitarianscope': 'humanitarian_scope',\n 'projectcontact': 'project_contact',\n 'indicatorperiod': 'indicator_period',\n 'indicatorperiodactualdimension': 'indicator_period_actual_dimension',\n 'indicatorperiodactuallocation': 'indicator_period_actual_location',\n 'indicatorperiodtargetdimension': 'indicator_period_target_dimension',\n 'indicatorperiodtargetlocation': 'indicator_period_target_location',\n 'indicatorreference': 'indicator_reference',\n 'indicatordimension': 'indicator_dimension',\n 'projectcondition': 'project_condition',\n 'budgetitem': 'budget_item',\n 'countrybudgetitem': 'country_budget_item',\n 'transactionsector': 'transaction_sector',\n 'planneddisbursement': 'planned_disbursement',\n 'projectlocation': 'project_location',\n 'administrativelocation': 'administrative_location',\n 'recipientcountry': 'recipient_country',\n 'recipientregion': 'recipient_region',\n 'policymarker': 'policy_marker',\n 'projectdocument': 'project_document',\n 'projectdocumentcategory': 'project_document_category',\n 'crsadd': 'crs_add',\n 'crsaddotherflag': 'crsadd_other_flag',\n 'fssforecast': 'fss_forecast',\n 'legacydata': 'legacy_data',\n }\n\n new_rel_objects = []\n\n for key in rel_objects:\n # First retrieve the unicode and create a new dict including the unicode\n db_table, old_key = key.split('.')\n Model = apps.get_model(db_table.split('_')[0], db_table.split('_')[1])\n unicode = Model.objects.get(pk=int(rel_objects[key])).__unicode__()\n new_dict_response = {\n 'new_id': rel_objects[key],\n 'unicode': unicode\n }\n\n # remove the 'rsr_' part (e.g. a key can be 'rsr_relatedproject') and look up the db_table\n # in the mapping, or take the default otherwise\n db_table = db_table[4:]\n if db_table in model_to_api:\n new_dict_response['old_id'] = '{0}.{1}'.format(model_to_api[db_table], old_key)\n else:\n new_dict_response['old_id'] = '{0}.{1}'.format(db_table, old_key)\n\n new_rel_objects.append(new_dict_response)\n\n return new_rel_objects\n\n\ndef add_changes(changes, obj, field, field_name, orig_data):\n \"\"\"\n Add the changes to the changes list in the required format. The changes list contains a list\n per related object, so we need to check if the object is already in the changes list and\n append the new changes to it.\n \"\"\"\n if obj not in [change[0] for change in changes]:\n # Object not yet in changes list\n changes.append([obj, [[field, field_name, orig_data]]])\n else:\n # Object in changes list, look it up and append the new changes\n for change in changes:\n if obj == change[0]:\n change[1].append([field, field_name, orig_data])\n break\n return changes\n\n\ndef handle_validation_error(e, fields, field_names, errors):\n validation_error_dict = dict(e)\n for field, field_name in zip(fields, field_names):\n if field in validation_error_dict:\n # Since we save the object per field, display the (first) error\n # of this field on the field itself.\n errors = add_error(errors, str(validation_error_dict[field][0]), field_name)\n validation_error_dict.pop(field)\n\n if validation_error_dict:\n for key, value in validation_error_dict.items():\n # Somewhere else in the model a validation error occurred (or a\n # combination of fields). We display this nonetheless and do\n # not save the field.\n field, field_name = fields[0], field_names[0]\n # Guess the correct field_name and display the error\n actual_field_name = field_name.replace('.{}.'.format(field), '.{}.'.format(key))\n errors = add_error(errors, str(value[0]), actual_field_name)\n\n\ndef update_object(Model, obj_id, fields, field_names, values, changes, errors,\n rel_objects, related_obj_id):\n \"\"\"Update an existing object.\n\n Take a list of fields and corresponding values, and update the object. The\n following steps are performed to do the update:\n\n 1. Try to retrieve the object.\n\n 2. Pre-process the data, ignore fields that have obviously incorrect data\n (wrong type and cannot be typecasted, etc.)\n\n 3. Set the attributes corresponding to the fields with the supplied values\n\n 4. Perform object and field validations\n\n 5. Return the changes and errors\n\n \"\"\"\n\n # Try to retrieve object with specified ID\n try:\n # Retrieve object and set new value of field\n obj = Model.objects.get(pk=int(obj_id))\n except (Model.DoesNotExist, ValueError) as e:\n # If object does not exist or 'obj_id' is not an integer, add an error\n # and do not process the object\n for field_name in field_names:\n errors = add_error(errors, str(e), field_name)\n return changes, errors, rel_objects\n\n # Set all the attributes with specified values\n for i, (field, field_name, value) in enumerate(zip(fields, field_names, values)):\n obj_data, errors = pre_process_data(field_name, value, errors)\n if field_name in [error['name'] for error in errors]:\n fields.pop(i)\n continue\n setattr(obj, field, obj_data)\n\n try:\n # The object has been retrieved, perform validations\n obj.full_clean(exclude=['primary_location',\n 'primary_organisation',\n 'last_update'])\n except ValidationError as e:\n handle_validation_error(e, fields, field_names, errors)\n except Exception as e:\n for field_name in field_names:\n # Just in case any other error will occur, this will also be\n # displayed underneath the field in the project editor.\n errors = add_error(errors, str(e), field_name)\n else:\n update_fields = fields\n # if the object has a last_modified_at field, include it in the update\n if hasattr(obj, 'last_modified_at'):\n update_fields = fields + ['last_modified_at']\n\n # No validation errors. Save the field and append the changes to the\n # changes list. In case of a non-Project object, add the object to the\n # related objects list, so that the ID will be replaced (in case of a\n # new object) and the unicode will be replaced.\n obj.save(update_fields=update_fields)\n for field, field_name, value in zip(fields, field_names, values):\n changes = add_changes(changes, obj, field, field_name, value)\n\n if not (related_obj_id in rel_objects or isinstance(obj, Project)):\n rel_objects[related_obj_id] = obj.pk\n\n return changes, errors, rel_objects\n\n\ndef update_m2m_object(project, Model, obj_id, field, orig_data, field_name, changes, errors,\n rel_objects, related_obj_id):\n\n m2m_relation = getattr(project, MANY_TO_MANY_FIELDS[Model])\n\n # We pre-process the data first. E.g. dates will be converted to datetime objects\n obj_data, errors = pre_process_data(field_name, orig_data, errors)\n if field_name in [error['name'] for error in errors]:\n return\n\n try:\n m2m_object = Model.objects.get(pk=int(obj_data))\n if obj_id is not None:\n # If there already was an appointed object in the many to many relation,\n # remove the old object first\n old_m2m_object = Model.objects.get(pk=int(obj_id))\n if old_m2m_object in m2m_relation.all():\n m2m_relation.remove(old_m2m_object)\n # Add the new many to many object to the project\n m2m_relation.add(m2m_object)\n changes = add_changes(changes, m2m_object, field, field_name, obj_data)\n if related_obj_id not in rel_objects:\n rel_objects[related_obj_id] = obj_data\n\n except Model.DoesNotExist as e:\n errors = add_error(errors, str(e), field_name)\n\n\ndef create_object(Model, kwargs, fields, field_names, values, changes, errors, rel_objects,\n related_obj_id):\n \"\"\"Create a new object.\n\n Either an error can occur while creating the object, or during the\n full_clean() function. In any case, catch the error and display it in the\n project editor.\n\n \"\"\"\n try:\n # Retrieve the object with the new value and perform validations.\n obj = Model.objects.create(**kwargs)\n obj.full_clean()\n except ValidationError as e:\n handle_validation_error(e, fields, field_names, errors)\n obj.delete()\n except MultipleObjectsReturned:\n # Multiple reporting organisations are not allowed and will raise a MultipleObjectsReturned\n # exception. In this case, display a nice error message and delete the created partnership.\n for field_name in field_names:\n message = unicode(_(u'There can be only one reporting organisation'))\n # FIXME: Not sure what the field name should be here...\n errors = add_error(errors, str(message), field_name)\n obj.delete()\n except Exception as e:\n # Just in case any other error will occur, this will also be displayed\n # underneath the field in the project editor.\n for field_name in field_names:\n errors = add_error(errors, str(e), field_name)\n else:\n # No validation errors. Save the field and append the changes to the changes list.\n # Add the object to the related objects list, so that the ID and unicode will be replaced.\n for field, field_name, value in zip(fields, field_names, values):\n changes = add_changes(changes, obj, field, field_name, value)\n rel_objects[related_obj_id] = obj.pk\n\n return changes, errors, rel_objects\n\n\ndef create_related_object(parent_obj_id, Model, fields, field_names, values, changes, errors,\n rel_objects, related_obj_id):\n \"\"\"Create a related object\n\n Create a related object with all the values for all the fields. It is\n called only once per object for each save in the project editor.\n\n Related objects are created \"fully\", and never need to be updated post\n creation, in a single project editor save.\n\n \"\"\"\n\n # Related object has not yet been created (not added to rel_objects dict)\n kwargs = dict()\n\n if Model in RELATED_OBJECTS_MAPPING:\n # Special mapping needed\n RelatedModel, related_field = RELATED_OBJECTS_MAPPING[Model]\n kwargs[related_field] = RelatedModel.objects.get(pk=parent_obj_id)\n else:\n # Project is the related object\n kwargs['project'] = Project.objects.get(pk=parent_obj_id)\n\n # Set all the attributes with specified values\n for field, field_name, value in zip(fields, field_names, values):\n obj_data, errors = pre_process_data(field_name, value, errors)\n if field_name in [error['name'] for error in errors]:\n continue\n # Add field data, create new object and add new id to rel_objects dict\n kwargs[field] = obj_data\n\n changes, errors, rel_objects = create_object(\n Model, kwargs, fields, field_names, values, changes, errors, rel_objects,\n related_obj_id\n )\n\n return changes, errors, rel_objects\n\n\ndef group_data_by_objects(data):\n \"\"\"Group form data by objects (based on model and id)\"\"\"\n grouped_data = {}\n for key, value in data.items():\n key_parts = split_key(key)\n group_key = (key_parts.model.model_name,) + tuple(key_parts.ids)\n grouped_data.setdefault(group_key, []).append((key, value, key_parts))\n return grouped_data\n\n\ndef group_get_all_fields(grouped_data, key_parts):\n group_key = (key_parts.model.model_name,) + tuple(key_parts.ids)\n update_data = grouped_data[group_key]\n keys = [key for key, _, _ in update_data]\n values = [value for _, value, _ in update_data]\n fields = [key_part.field for _, _, key_part in update_data]\n return fields, values, keys\n\n\ndef sort_keys(x):\n \"\"\"Compute a level at which the model corresponding to the key occurs.\n\n This function is used to sort keys in the data such that the objects higher\n in the hierarchy appear before the objects that depend on them.\n\n For example, Project -> Result -> Indicator, IndicatorPeriod\n\n The level is computed based on the number of steps we can take in the\n RELATED_OBJECTS_MAPPING hierarchy before we reach the Project.\n\n \"\"\"\n key_parts = split_key(x)\n Model = apps.get_model(key_parts.model.app, key_parts.model.model_name)\n level = 1\n while Model in RELATED_OBJECTS_MAPPING:\n level += 1\n Model, _ = RELATED_OBJECTS_MAPPING[Model]\n if Model in MANY_TO_MANY_FIELDS or Model != Project:\n level += 1\n return level\n\n\ndef create_or_update_objects_from_data(project, data):\n errors, changes, rel_objects = [], [], {}\n\n # Keys like this are possible:\n # 'rsr_indicatorperiod.period_start.1234_new-0_new-0_new-0' Meaning that\n # there is a new indicator period (the last id is 'new-0'), with a new\n # indicator (second last id is also 'new-0'), with a new result (second id\n # is also 'new-0'), on an existing project (project id is '1234'). We sort\n # the keys in such a way that the result appears before the indicator which\n # appears before the indicatorperiod. This ensures that objects higher in\n # the hierarchy, which lower objects depend on, are created first.\n\n grouped_data = group_data_by_objects(data)\n sorted_keys = sorted(data.keys(), key=sort_keys)\n\n for key in sorted_keys:\n\n # When saving all fields on an object, a bunch of fields are\n # removed together. This may cause some keys to not be present,\n # when iterating over the sorted keys.\n if key not in data:\n continue\n\n # The keys in form data are of format \"rsr_project.title.1234\".\n # Separated by .'s, the data contains the model name, field name and object id list\n key_parts = split_key(key)\n\n # Retrieve the model and related object ID (e.g. rsr_project.1234)\n Model = apps.get_model(key_parts.model.app, key_parts.model.model_name)\n related_obj_id = ''.join(\n [key_parts.model.table_name, '.', '_'.join(key_parts.ids)]\n )\n\n if Model in MANY_TO_MANY_FIELDS:\n # This field is a many to many field, which need special handling\n obj_id = None if len(key_parts.ids) != 1 else key_parts.ids[0]\n update_m2m_object(\n project, Model, obj_id, key_parts.field, data[key], key, changes, errors,\n rel_objects, related_obj_id\n )\n data.pop(key, None)\n\n elif len(key_parts.ids) == 1:\n # Already existing object, update it\n fields, values, keys = group_get_all_fields(grouped_data, key_parts)\n changes, errors, rel_objects = update_object(\n Model, key_parts.ids[0], fields, keys, values, changes, errors, rel_objects,\n related_obj_id\n )\n for key in keys:\n data.pop(key, None)\n\n else:\n # New object, with potentially a new parent as well\n parent_id = '_'.join(key_parts.ids[:-1])\n\n if 'new' not in parent_id:\n # New object, but parent is already existing\n parent_obj_id = key_parts.ids[-2]\n\n else:\n # New object, and parent are new according to the key.\n # However, it is possible that the parent was already\n # created earlier in the script. So we also check if\n # the parent object was already created earlier.\n ParentModel, _ = RELATED_OBJECTS_MAPPING[Model]\n parent_obj_rel_obj_key = ParentModel._meta.db_table + '.' + parent_id\n if parent_obj_rel_obj_key in rel_objects:\n parent_obj_id = rel_objects[parent_obj_rel_obj_key]\n else:\n parent_obj_id = None\n\n if parent_obj_id is not None:\n fields, values, keys = group_get_all_fields(grouped_data, key_parts)\n create_related_object(\n parent_obj_id, Model, fields, keys, values, changes, errors, rel_objects,\n related_obj_id\n )\n for key in keys:\n data.pop(key, None)\n\n else:\n # Parent object has not been created yet.\n # We should never get to this state!\n raise RuntimeError('There was a problem walking the hierarchy of objects')\n\n return errors, changes, rel_objects\n", "path": "akvo/rest/views/project_editor_utils.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport datetime\nimport decimal\n\nfrom akvo.rsr.fields import (LatitudeField, LongitudeField,\n ProjectLimitedTextField, ValidXMLCharField,\n ValidXMLTextField)\nfrom akvo.rsr.models import (AdministrativeLocation, BudgetItemLabel, Country,\n CrsAdd, CrsAddOtherFlag, Fss, FssForecast,\n Indicator, IndicatorDimension, IndicatorLabel,\n IndicatorPeriod, IndicatorReference,\n IndicatorPeriodActualDimension,\n IndicatorPeriodActualLocation,\n IndicatorPeriodTargetDimension,\n IndicatorPeriodTargetLocation, Keyword,\n Organisation, Project, OrganisationIndicatorLabel,\n ProjectDocument, ProjectDocumentCategory, ProjectLocation,\n Result, Transaction, TransactionSector)\nfrom akvo.utils import DjangoModel\n\nfrom collections import namedtuple\n\nfrom django.apps import apps\nfrom django.contrib.admin.models import LogEntry, CHANGE\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import MultipleObjectsReturned, ValidationError\nfrom django.db.models import (BooleanField, DateField, DecimalField, EmailField,\n ForeignKey, ManyToManyField, NullBooleanField, PositiveIntegerField,\n PositiveSmallIntegerField, URLField)\nfrom django.utils.translation import ugettext_lazy as _\n\n\nRELATED_OBJECTS_MAPPING = {\n # Special mapping for related objects without a 'project' field\n Indicator: (Result, 'result'),\n IndicatorLabel: (Indicator, 'indicator'),\n IndicatorPeriod: (Indicator, 'indicator'),\n IndicatorReference: (Indicator, 'indicator'),\n IndicatorDimension: (Indicator, 'indicator'),\n IndicatorPeriodActualDimension: (IndicatorPeriod, 'period'),\n IndicatorPeriodActualLocation: (IndicatorPeriod, 'period'),\n IndicatorPeriodTargetDimension: (IndicatorPeriod, 'period'),\n IndicatorPeriodTargetLocation: (IndicatorPeriod, 'period'),\n TransactionSector: (Transaction, 'transaction'),\n ProjectLocation: (Project, 'location_target'),\n AdministrativeLocation: (ProjectLocation, 'location'),\n ProjectDocumentCategory: (ProjectDocument, 'document'),\n CrsAddOtherFlag: (CrsAdd, 'crs'),\n FssForecast: (Fss, 'fss'),\n}\n\nMANY_TO_MANY_FIELDS = {\n # Special mapping for many to many fields\n Keyword: 'keywords',\n}\n\n\ndef add_error(errors, message, field_name):\n \"\"\"Appends a new error to the errors list.\"\"\"\n if isinstance(message, BaseException):\n message = message.message\n errors.append(\n {'name': field_name,\n 'error': message.encode('utf8').capitalize()}\n )\n return errors\n\n\ndef log_changes(changes, user, project):\n \"\"\"Logs all changes to Django's LogEntry model.\"\"\"\n if changes:\n change_message = u''\n first_part = u'%s' % _(u'Project editor, changed: ')\n\n for obj_changes in changes:\n obj = obj_changes[0]\n fields = [obj_change[0] for obj_change in obj_changes[1]]\n\n if not isinstance(obj, Project):\n obj_change_message = u''\n\n for field in fields:\n obj_change_message += u'%s, ' % field\n\n LogEntry.objects.log_action(\n user_id=user.pk,\n content_type_id=ContentType.objects.get_for_model(obj).pk,\n object_id=obj.pk,\n object_repr=obj.__unicode__(),\n action_flag=CHANGE,\n change_message=first_part + obj_change_message[:-2] + u'.'\n )\n\n change_message += u'%s (id: %s): ' % (type(obj)._meta.verbose_name.capitalize(),\n str(obj.pk)) + obj_change_message\n\n else:\n for field in fields:\n change_message += u'%s, ' % field\n\n change_message = change_message[:-2] + u'.'\n\n LogEntry.objects.log_action(\n user_id=user.pk,\n content_type_id=ContentType.objects.get_for_model(project).pk,\n object_id=project.pk,\n object_repr=project.__unicode__(),\n action_flag=CHANGE,\n change_message=first_part + change_message\n )\n\n field_changes = []\n for change in changes:\n for fields in change[1]:\n if not (fields[1] == 'photo' or 'document-document-' in fields[1]):\n field_changes.append([fields[1], fields[2]])\n\n return field_changes\n\n return []\n\n\ndef split_key(key):\n \"\"\"\n Helper function for splitting the keys of the form data. Key input will be a string like\n 'rsr_relatedproject.relation.1234_new-0' and it will return a KeyInfo namedtuple\n \"\"\"\n KeyParts = namedtuple('KeyParts', 'model, field, ids')\n\n key_parts = KeyParts._make(key.split('.'))\n return KeyParts._make([\n DjangoModel._make([key_parts.model] + key_parts.model.split('_')),\n key_parts.field,\n key_parts.ids.split('_')\n ])\n\n\ndef pre_process_data(key, data, errors):\n \"\"\"\n Pre-process the data. Needed to transform some of the form data to usable data in the Django\n models. Returns the processed data and any errors that have occurred so far.\n \"\"\"\n # Retrieve field information first\n try:\n key_parts = split_key(key)\n except TypeError:\n return data, errors\n\n Model = apps.get_model(key_parts.model.app, key_parts.model.model_name)\n model_field = Model._meta.get_field(key_parts.field)\n\n # Text data does not need pre-processing\n if isinstance(model_field, (EmailField, ProjectLimitedTextField, URLField, ValidXMLCharField,\n ValidXMLTextField)):\n return data.strip(), errors\n\n # Dates should be converted to a datetime object, or None if empty\n if isinstance(model_field, DateField):\n if data:\n try:\n return datetime.datetime.strptime(data, \"%d/%m/%Y\").strftime(\"%Y-%m-%d\"), errors\n except ValueError as e:\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Integers should be converted to an integer\n if isinstance(model_field, (PositiveIntegerField, PositiveSmallIntegerField)):\n if data:\n try:\n return int(data), errors\n except ValueError as e:\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Decimals should be converted to a decimal\n if isinstance(model_field, DecimalField):\n if data:\n try:\n return decimal.Decimal(data), errors\n except decimal.InvalidOperation as e:\n if ',' in data:\n # Specific error message for commas\n e = u'%s' % _(u'It is not allowed to use a comma, use a period to denote '\n u'decimals.')\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Latitude and longitude should be converted to a float\n if isinstance(model_field, (LatitudeField, LongitudeField)):\n if data:\n try:\n return float(data), errors\n except ValueError as e:\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Booleans should be converted to True or False\n if isinstance(model_field, BooleanField):\n return (True, errors) if data == '1' else (False, errors)\n\n # Booleans should be converted to True, False or None\n if isinstance(model_field, NullBooleanField):\n if data == '1':\n return True, errors\n elif data == '2':\n return False, errors\n else:\n return None, errors\n\n # In case of a foreign key, we first check if this is a project or organisation foreign key.\n # Then the data should be converted to the related object.\n if isinstance(model_field, ForeignKey):\n if data:\n try:\n if 'project' in key_parts.field:\n return Project.objects.get(pk=int(data)), errors\n if 'organisation' in key_parts.field:\n return Organisation.objects.get(pk=int(data)), errors\n if key_parts.model.model_name == 'indicatorlabel' and key_parts.field == 'label':\n return OrganisationIndicatorLabel.objects.get(pk=int(data)), errors\n if key_parts.model.model_name == 'budgetitem' and key_parts.field == 'label':\n return BudgetItemLabel.objects.get(pk=int(data)), errors\n if 'country' in key_parts.field:\n return Country.objects.get(pk=int(data)), errors\n return None, errors\n except (Project.DoesNotExist, Organisation.DoesNotExist, BudgetItemLabel.DoesNotExist,\n Country.DoesNotExist, OrganisationIndicatorLabel.DoesNotExist) as e:\n errors = add_error(errors, e, key)\n return None, errors\n else:\n return None, errors\n\n # Keywords is the only ManyToManyField\n if isinstance(model_field, ManyToManyField):\n try:\n return Keyword.objects.get(pk=int(data)), errors\n except Exception as e:\n errors = add_error(errors, e, key)\n return None, errors\n\n return data, errors\n\n\ndef convert_related_objects(rel_objects):\n \"\"\"\n Converts related objects (db_table without 'rsr_') to the API endpoint so that it can be used\n in the template.\n \"\"\"\n\n model_to_api = {\n 'relatedproject': 'related_project',\n 'humanitarianscope': 'humanitarian_scope',\n 'projectcontact': 'project_contact',\n 'indicatorperiod': 'indicator_period',\n 'indicatorperiodactualdimension': 'indicator_period_actual_dimension',\n 'indicatorperiodactuallocation': 'indicator_period_actual_location',\n 'indicatorperiodtargetdimension': 'indicator_period_target_dimension',\n 'indicatorperiodtargetlocation': 'indicator_period_target_location',\n 'indicatorreference': 'indicator_reference',\n 'indicatordimension': 'indicator_dimension',\n 'projectcondition': 'project_condition',\n 'budgetitem': 'budget_item',\n 'countrybudgetitem': 'country_budget_item',\n 'transactionsector': 'transaction_sector',\n 'planneddisbursement': 'planned_disbursement',\n 'projectlocation': 'project_location',\n 'administrativelocation': 'administrative_location',\n 'recipientcountry': 'recipient_country',\n 'recipientregion': 'recipient_region',\n 'policymarker': 'policy_marker',\n 'projectdocument': 'project_document',\n 'projectdocumentcategory': 'project_document_category',\n 'crsadd': 'crs_add',\n 'crsaddotherflag': 'crsadd_other_flag',\n 'fssforecast': 'fss_forecast',\n 'legacydata': 'legacy_data',\n }\n\n new_rel_objects = []\n\n for key in rel_objects:\n # First retrieve the unicode and create a new dict including the unicode\n db_table, old_key = key.split('.')\n Model = apps.get_model(db_table.split('_')[0], db_table.split('_')[1])\n unicode = Model.objects.get(pk=int(rel_objects[key])).__unicode__()\n new_dict_response = {\n 'new_id': rel_objects[key],\n 'unicode': unicode\n }\n\n # remove the 'rsr_' part (e.g. a key can be 'rsr_relatedproject') and look up the db_table\n # in the mapping, or take the default otherwise\n db_table = db_table[4:]\n if db_table in model_to_api:\n new_dict_response['old_id'] = '{0}.{1}'.format(model_to_api[db_table], old_key)\n else:\n new_dict_response['old_id'] = '{0}.{1}'.format(db_table, old_key)\n\n new_rel_objects.append(new_dict_response)\n\n return new_rel_objects\n\n\ndef add_changes(changes, obj, field, field_name, orig_data):\n \"\"\"\n Add the changes to the changes list in the required format. The changes list contains a list\n per related object, so we need to check if the object is already in the changes list and\n append the new changes to it.\n \"\"\"\n if obj not in [change[0] for change in changes]:\n # Object not yet in changes list\n changes.append([obj, [[field, field_name, orig_data]]])\n else:\n # Object in changes list, look it up and append the new changes\n for change in changes:\n if obj == change[0]:\n change[1].append([field, field_name, orig_data])\n break\n return changes\n\n\ndef handle_validation_error(e, fields, field_names, errors):\n validation_error_dict = dict(e)\n for field, field_name in zip(fields, field_names):\n if field in validation_error_dict:\n # Since we save the object per field, display the (first) error\n # of this field on the field itself.\n errors = add_error(errors, str(validation_error_dict[field][0]), field_name)\n validation_error_dict.pop(field)\n\n if validation_error_dict:\n for key, value in validation_error_dict.items():\n # Somewhere else in the model a validation error occurred (or a\n # combination of fields). We display this nonetheless and do\n # not save the field.\n field, field_name = fields[0], field_names[0]\n # Guess the correct field_name and display the error\n actual_field_name = field_name.replace('.{}.'.format(field), '.{}.'.format(key))\n errors = add_error(errors, str(value[0]), actual_field_name)\n\n\ndef update_object(Model, obj_id, fields, field_names, values, changes, errors,\n rel_objects, related_obj_id):\n \"\"\"Update an existing object.\n\n Take a list of fields and corresponding values, and update the object. The\n following steps are performed to do the update:\n\n 1. Try to retrieve the object.\n\n 2. Pre-process the data, ignore fields that have obviously incorrect data\n (wrong type and cannot be typecasted, etc.)\n\n 3. Set the attributes corresponding to the fields with the supplied values\n\n 4. Perform object and field validations\n\n 5. Return the changes and errors\n\n \"\"\"\n\n # Try to retrieve object with specified ID\n try:\n # Retrieve object and set new value of field\n obj = Model.objects.get(pk=int(obj_id))\n except (Model.DoesNotExist, ValueError) as e:\n # If object does not exist or 'obj_id' is not an integer, add an error\n # and do not process the object\n for field_name in field_names:\n errors = add_error(errors, str(e), field_name)\n return changes, errors, rel_objects\n\n # Set all the attributes with specified values\n for i, (field, field_name, value) in enumerate(zip(fields, field_names, values)):\n obj_data, errors = pre_process_data(field_name, value, errors)\n if field_name in [error['name'] for error in errors]:\n fields.pop(i)\n continue\n setattr(obj, field, obj_data)\n\n try:\n # The object has been retrieved, perform validations\n obj.full_clean(exclude=['primary_location',\n 'primary_organisation',\n 'last_update'])\n except ValidationError as e:\n handle_validation_error(e, fields, field_names, errors)\n except Exception as e:\n for field_name in field_names:\n # Just in case any other error will occur, this will also be\n # displayed underneath the field in the project editor.\n errors = add_error(errors, str(e), field_name)\n else:\n update_fields = fields\n # if the object has a last_modified_at field, include it in the update\n if hasattr(obj, 'last_modified_at'):\n update_fields = fields + ['last_modified_at']\n\n # No validation errors. Save the field and append the changes to the\n # changes list. In case of a non-Project object, add the object to the\n # related objects list, so that the ID will be replaced (in case of a\n # new object) and the unicode will be replaced.\n obj.save(update_fields=update_fields)\n for field, field_name, value in zip(fields, field_names, values):\n changes = add_changes(changes, obj, field, field_name, value)\n\n if not (related_obj_id in rel_objects or isinstance(obj, Project)):\n rel_objects[related_obj_id] = obj.pk\n\n return changes, errors, rel_objects\n\n\ndef update_m2m_object(project, Model, obj_id, field, orig_data, field_name, changes, errors,\n rel_objects, related_obj_id):\n\n m2m_relation = getattr(project, MANY_TO_MANY_FIELDS[Model])\n\n # We pre-process the data first. E.g. dates will be converted to datetime objects\n obj_data, errors = pre_process_data(field_name, orig_data, errors)\n if field_name in [error['name'] for error in errors]:\n return\n\n try:\n m2m_object = Model.objects.get(pk=int(obj_data))\n if obj_id is not None:\n # If there already was an appointed object in the many to many relation,\n # remove the old object first\n old_m2m_object = Model.objects.get(pk=int(obj_id))\n if old_m2m_object in m2m_relation.all():\n m2m_relation.remove(old_m2m_object)\n # Add the new many to many object to the project\n m2m_relation.add(m2m_object)\n changes = add_changes(changes, m2m_object, field, field_name, obj_data)\n if related_obj_id not in rel_objects:\n rel_objects[related_obj_id] = obj_data\n\n except Model.DoesNotExist as e:\n errors = add_error(errors, str(e), field_name)\n\n\ndef create_object(Model, kwargs, fields, field_names, values, changes, errors, rel_objects,\n related_obj_id):\n \"\"\"Create a new object.\n\n Either an error can occur while creating the object, or during the\n full_clean() function. In any case, catch the error and display it in the\n project editor.\n\n \"\"\"\n try:\n # Retrieve the object with the new value and perform validations.\n obj = Model.objects.create(**kwargs)\n obj.full_clean()\n except ValidationError as e:\n handle_validation_error(e, fields, field_names, errors)\n obj.delete()\n except MultipleObjectsReturned:\n # Multiple reporting organisations are not allowed and will raise a MultipleObjectsReturned\n # exception. In this case, display a nice error message and delete the created partnership.\n for field_name in field_names:\n message = unicode(_(u'There can be only one reporting organisation'))\n # FIXME: Not sure what the field name should be here...\n errors = add_error(errors, str(message), field_name)\n obj.delete()\n except Exception as e:\n # Just in case any other error will occur, this will also be displayed\n # underneath the field in the project editor.\n for field_name in field_names:\n errors = add_error(errors, str(e), field_name)\n else:\n # No validation errors. Save the field and append the changes to the changes list.\n # Add the object to the related objects list, so that the ID and unicode will be replaced.\n for field, field_name, value in zip(fields, field_names, values):\n changes = add_changes(changes, obj, field, field_name, value)\n rel_objects[related_obj_id] = obj.pk\n\n return changes, errors, rel_objects\n\n\ndef create_related_object(parent_obj_id, Model, fields, field_names, values, changes, errors,\n rel_objects, related_obj_id):\n \"\"\"Create a related object\n\n Create a related object with all the values for all the fields. It is\n called only once per object for each save in the project editor.\n\n Related objects are created \"fully\", and never need to be updated post\n creation, in a single project editor save.\n\n \"\"\"\n\n # Related object has not yet been created (not added to rel_objects dict)\n kwargs = dict()\n\n if Model in RELATED_OBJECTS_MAPPING:\n # Special mapping needed\n RelatedModel, related_field = RELATED_OBJECTS_MAPPING[Model]\n kwargs[related_field] = RelatedModel.objects.get(pk=parent_obj_id)\n else:\n # Project is the related object\n kwargs['project'] = Project.objects.get(pk=parent_obj_id)\n\n # Set all the attributes with specified values\n for field, field_name, value in zip(fields, field_names, values):\n obj_data, errors = pre_process_data(field_name, value, errors)\n if field_name in [error['name'] for error in errors]:\n continue\n # Add field data, create new object and add new id to rel_objects dict\n kwargs[field] = obj_data\n\n changes, errors, rel_objects = create_object(\n Model, kwargs, fields, field_names, values, changes, errors, rel_objects,\n related_obj_id\n )\n\n return changes, errors, rel_objects\n\n\ndef group_data_by_objects(data):\n \"\"\"Group form data by objects (based on model and id)\"\"\"\n grouped_data = {}\n for key, value in data.items():\n key_parts = split_key(key)\n group_key = (key_parts.model.model_name,) + tuple(key_parts.ids)\n grouped_data.setdefault(group_key, []).append((key, value, key_parts))\n return grouped_data\n\n\ndef group_get_all_fields(grouped_data, key_parts):\n group_key = (key_parts.model.model_name,) + tuple(key_parts.ids)\n update_data = grouped_data[group_key]\n keys = [key for key, _, _ in update_data]\n values = [value for _, value, _ in update_data]\n fields = [key_part.field for _, _, key_part in update_data]\n return fields, values, keys\n\n\ndef sort_keys(x):\n \"\"\"Compute a level at which the model corresponding to the key occurs.\n\n This function is used to sort keys in the data such that the objects higher\n in the hierarchy appear before the objects that depend on them.\n\n For example, Project -> Result -> Indicator, IndicatorPeriod\n\n The level is computed based on the number of steps we can take in the\n RELATED_OBJECTS_MAPPING hierarchy before we reach the Project.\n\n \"\"\"\n key_parts = split_key(x)\n Model = apps.get_model(key_parts.model.app, key_parts.model.model_name)\n level = 1\n while Model in RELATED_OBJECTS_MAPPING:\n level += 1\n Model, _ = RELATED_OBJECTS_MAPPING[Model]\n if Model in MANY_TO_MANY_FIELDS or Model != Project:\n level += 1\n return (level, key_parts.ids)\n\n\ndef create_or_update_objects_from_data(project, data):\n errors, changes, rel_objects = [], [], {}\n\n # Keys like this are possible:\n # 'rsr_indicatorperiod.period_start.1234_new-0_new-0_new-0' Meaning that\n # there is a new indicator period (the last id is 'new-0'), with a new\n # indicator (second last id is also 'new-0'), with a new result (second id\n # is also 'new-0'), on an existing project (project id is '1234'). We sort\n # the keys in such a way that the result appears before the indicator which\n # appears before the indicatorperiod. This ensures that objects higher in\n # the hierarchy, which lower objects depend on, are created first.\n\n grouped_data = group_data_by_objects(data)\n sorted_keys = sorted(data.keys(), key=sort_keys)\n\n for key in sorted_keys:\n\n # When saving all fields on an object, a bunch of fields are\n # removed together. This may cause some keys to not be present,\n # when iterating over the sorted keys.\n if key not in data:\n continue\n\n # The keys in form data are of format \"rsr_project.title.1234\".\n # Separated by .'s, the data contains the model name, field name and object id list\n key_parts = split_key(key)\n\n # Retrieve the model and related object ID (e.g. rsr_project.1234)\n Model = apps.get_model(key_parts.model.app, key_parts.model.model_name)\n related_obj_id = ''.join(\n [key_parts.model.table_name, '.', '_'.join(key_parts.ids)]\n )\n\n if Model in MANY_TO_MANY_FIELDS:\n # This field is a many to many field, which need special handling\n obj_id = None if len(key_parts.ids) != 1 else key_parts.ids[0]\n update_m2m_object(\n project, Model, obj_id, key_parts.field, data[key], key, changes, errors,\n rel_objects, related_obj_id\n )\n data.pop(key, None)\n\n elif len(key_parts.ids) == 1:\n # Already existing object, update it\n fields, values, keys = group_get_all_fields(grouped_data, key_parts)\n changes, errors, rel_objects = update_object(\n Model, key_parts.ids[0], fields, keys, values, changes, errors, rel_objects,\n related_obj_id\n )\n for key in keys:\n data.pop(key, None)\n\n else:\n # New object, with potentially a new parent as well\n parent_id = '_'.join(key_parts.ids[:-1])\n\n if 'new' not in parent_id:\n # New object, but parent is already existing\n parent_obj_id = key_parts.ids[-2]\n\n else:\n # New object, and parent are new according to the key.\n # However, it is possible that the parent was already\n # created earlier in the script. So we also check if\n # the parent object was already created earlier.\n ParentModel, _ = RELATED_OBJECTS_MAPPING[Model]\n parent_obj_rel_obj_key = ParentModel._meta.db_table + '.' + parent_id\n if parent_obj_rel_obj_key in rel_objects:\n parent_obj_id = rel_objects[parent_obj_rel_obj_key]\n else:\n parent_obj_id = None\n\n if parent_obj_id is not None:\n fields, values, keys = group_get_all_fields(grouped_data, key_parts)\n create_related_object(\n parent_obj_id, Model, fields, keys, values, changes, errors, rel_objects,\n related_obj_id\n )\n for key in keys:\n data.pop(key, None)\n\n else:\n # Parent object has not been created yet.\n # We should never get to this state!\n raise RuntimeError('There was a problem walking the hierarchy of objects')\n\n return errors, changes, rel_objects\n", "path": "akvo/rest/views/project_editor_utils.py" } ]
diff --git a/akvo/rest/views/project_editor_utils.py b/akvo/rest/views/project_editor_utils.py index a591f41624..a686d6fe41 100644 --- a/akvo/rest/views/project_editor_utils.py +++ b/akvo/rest/views/project_editor_utils.py @@ -569,7 +569,7 @@ def sort_keys(x): Model, _ = RELATED_OBJECTS_MAPPING[Model] if Model in MANY_TO_MANY_FIELDS or Model != Project: level += 1 - return level + return (level, key_parts.ids) def create_or_update_objects_from_data(project, data): diff --git a/akvo/rsr/tests/rest/test_project_editor.py b/akvo/rsr/tests/rest/test_project_editor.py index ddc3c2cd43..885438fb2e 100644 --- a/akvo/rsr/tests/rest/test_project_editor.py +++ b/akvo/rsr/tests/rest/test_project_editor.py @@ -996,6 +996,9 @@ def test_creating_project_attirbute_hierarchy(self): self.assertEqual(result_2.type, result_type_2) self.assertEqual(result_2.aggregation_status, result_aggregation_2 == '1') + # Verify that ordering is maintained + self.assertLess(result.id, result_2.id) + indicator = Indicator.objects.get(result=result) self.assertEqual(indicator.title, indicator_title) self.assertEqual(indicator.description, indicator_description)
translate__translate-3897
Traceback with non-ascii header key If a PO header contains a non-ASCII key (left of the colon), we have a traceback. An example is file here: https://gitlab.gnome.org/GNOME/gimp/blob/master/po-python/oc.po#L21 The traceback comes from poheader.gettargetlanguage(): ```python-traceback File "env/lib/python2.7/site-packages/translate/storage/poheader.py", line 286, in gettargetlanguage header = self.parseheader() File "env/lib/python2.7/site-packages/translate/storage/poheader.py", line 219, in parseheader return parseheaderstring(header.target) File "env/lib/python2.7/site-packages/translate/storage/poheader.py", line 52, in parseheaderstring key = str(key.strip()) UnicodeEncodeError: 'ascii' codec can't encode character u'\xed' in position 4: ordinal not in range(128) ``` I don't have a usable toolkit environment configured, so anybody is free to fix this if they can/want. I guess translate/storage/poheader.py::parseheaderstring() line 52 the `str(key.strip())` could probably just be without the str(), but we'll have to check the tests. Also, I think in some places header dictionaries might be passed as **kwargs, and non-ascii values might not work in Python 2, so I'm not sure it is a trivial change.
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2002-2011 Zuza Software Foundation\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"class that handles all header functions for a header in a po file\"\"\"\n\nimport re\nimport six\nimport time\nfrom collections import OrderedDict\n\nfrom translate import __version__\nfrom translate.misc.dictutils import cidict\n\n\nauthor_re = re.compile(r\".*<\\S+@\\S+>.*\\d{4,4}\")\n\ndefault_header = {\n \"Project-Id-Version\": \"PACKAGE VERSION\",\n \"PO-Revision-Date\": \"YEAR-MO-DA HO:MI+ZONE\",\n \"Last-Translator\": \"FULL NAME <EMAIL@ADDRESS>\",\n \"Language-Team\": \"LANGUAGE <[email protected]>\",\n \"Plural-Forms\": \"nplurals=INTEGER; plural=EXPRESSION;\",\n}\n\n\ndef parseheaderstring(input):\n \"\"\"Parses an input string with the definition of a PO header and returns\n the interpreted values as a dictionary.\n \"\"\"\n headervalues = OrderedDict()\n for line in input.split(\"\\n\"):\n if not line or \":\" not in line:\n continue\n key, value = line.split(\":\", 1)\n #We don't want unicode keys\n key = str(key.strip())\n headervalues[key] = value.strip()\n return headervalues\n\n\ndef tzstring():\n \"\"\"Returns the timezone as a string in the format [+-]0000, eg +0200.\n\n :rtype: str\n \"\"\"\n if time.daylight:\n tzoffset = time.altzone\n else:\n tzoffset = time.timezone\n\n hours, minutes = time.gmtime(abs(tzoffset))[3:5]\n if tzoffset > 0:\n hours *= -1\n tz = str(\"%+d\" % hours).zfill(3) + str(minutes).zfill(2)\n return tz\n\n\ndef update(existing, add=False, **kwargs):\n \"\"\"Update an existing header dictionary with the values in kwargs, adding\n new values only if add is true.\n\n :return: Updated dictionary of header entries\n :rtype: dict of strings\n \"\"\"\n headerargs = OrderedDict()\n fixedargs = cidict()\n for key, value in kwargs.items():\n key = key.replace(\"_\", \"-\")\n if key.islower():\n key = key.title()\n fixedargs[key] = value\n removed = []\n for key in poheader.header_order:\n if key in existing:\n if key in fixedargs:\n headerargs[key] = fixedargs.pop(key)\n else:\n headerargs[key] = existing[key]\n removed.append(key)\n elif add and key in fixedargs:\n headerargs[key] = fixedargs.pop(key)\n for key, value in six.iteritems(existing):\n if key not in removed:\n headerargs[key] = value\n if add:\n for key in fixedargs:\n headerargs[key] = fixedargs[key]\n return headerargs\n\n\nclass poheader(object):\n \"\"\"This class implements functionality for manipulation of po file headers.\n This class is a mix-in class and useless on its own. It must be used from\n all classes which represent a po file\n \"\"\"\n\n x_generator = \"Translate Toolkit %s\" % __version__.sver\n\n header_order = [\n \"Project-Id-Version\",\n \"Report-Msgid-Bugs-To\",\n \"POT-Creation-Date\",\n \"PO-Revision-Date\",\n \"Last-Translator\",\n \"Language-Team\",\n \"Language\",\n \"MIME-Version\",\n \"Content-Type\",\n \"Content-Transfer-Encoding\",\n \"Plural-Forms\",\n \"X-Accelerator-Marker\",\n \"X-Generator\",\n \"X-Merge-On\",\n ]\n\n def init_headers(self, charset='UTF-8', encoding='8bit', **kwargs):\n \"\"\"sets default values for po headers\"\"\"\n #FIXME: we need to allow at least setting target language, pluralforms and generator\n headerdict = self.makeheaderdict(charset=charset, encoding=encoding, **kwargs)\n self.updateheader(add=True, **headerdict)\n return self.header()\n\n def makeheaderdict(self,\n charset=\"CHARSET\",\n encoding=\"ENCODING\",\n project_id_version=None,\n pot_creation_date=None,\n po_revision_date=None,\n last_translator=None,\n language_team=None,\n mime_version=None,\n plural_forms=None,\n report_msgid_bugs_to=None,\n **kwargs):\n \"\"\"Create a header dictionary with useful defaults.\n\n pot_creation_date can be None (current date) or a value (datetime or string)\n po_revision_date can be None (form), False (=pot_creation_date), True (=now),\n or a value (datetime or string)\n\n :return: Dictionary with the header items\n :rtype: dict of strings\n \"\"\"\n if project_id_version is None:\n project_id_version = \"PACKAGE VERSION\"\n if pot_creation_date is None or pot_creation_date is True:\n pot_creation_date = time.strftime(\"%Y-%m-%d %H:%M\") + tzstring()\n if isinstance(pot_creation_date, time.struct_time):\n pot_creation_date = time.strftime(\"%Y-%m-%d %H:%M\", pot_creation_date) + tzstring()\n if po_revision_date is None:\n po_revision_date = \"YEAR-MO-DA HO:MI+ZONE\"\n elif po_revision_date is False:\n po_revision_date = pot_creation_date\n elif po_revision_date is True:\n po_revision_date = time.strftime(\"%Y-%m-%d %H:%M\") + tzstring()\n if isinstance(po_revision_date, time.struct_time):\n po_revision_date = time.strftime(\"%Y-%m-%d %H:%M\", po_revision_date) + tzstring()\n if last_translator is None:\n last_translator = \"FULL NAME <EMAIL@ADDRESS>\"\n if language_team is None:\n language_team = \"LANGUAGE <[email protected]>\"\n if mime_version is None:\n mime_version = \"1.0\"\n if report_msgid_bugs_to is None:\n report_msgid_bugs_to = \"\"\n\n defaultargs = OrderedDict()\n defaultargs[\"Project-Id-Version\"] = project_id_version\n defaultargs[\"Report-Msgid-Bugs-To\"] = report_msgid_bugs_to\n defaultargs[\"POT-Creation-Date\"] = pot_creation_date\n defaultargs[\"PO-Revision-Date\"] = po_revision_date\n defaultargs[\"Last-Translator\"] = last_translator\n defaultargs[\"Language-Team\"] = language_team\n defaultargs[\"MIME-Version\"] = mime_version\n defaultargs[\"Content-Type\"] = \"text/plain; charset=%s\" % charset\n defaultargs[\"Content-Transfer-Encoding\"] = encoding\n if plural_forms:\n defaultargs[\"Plural-Forms\"] = plural_forms\n defaultargs[\"X-Generator\"] = self.x_generator\n\n return update(defaultargs, add=True, **kwargs)\n\n def header(self):\n \"\"\"Returns the header element, or None. Only the first element is\n allowed to be a header. Note that this could still return an empty\n header element, if present.\n \"\"\"\n if len(self.units) == 0:\n return None\n candidate = self.units[0]\n if candidate.isheader():\n return candidate\n else:\n return None\n\n def parseheader(self):\n \"\"\"Parses the PO header and returns the interpreted values as a\n dictionary.\n \"\"\"\n header = self.header()\n if not header:\n return {}\n return parseheaderstring(header.target)\n\n def updateheader(self, add=False, **kwargs):\n \"\"\"Updates the fields in the PO style header.\n\n This will create a header if add == True.\n \"\"\"\n header = self.header()\n if not header:\n if add:\n header = self.makeheader(**kwargs)\n self._insert_header(header)\n else:\n headeritems = update(self.parseheader(), add, **kwargs)\n keys = headeritems.keys()\n if \"Content-Type\" not in keys or \"charset=CHARSET\" in headeritems[\"Content-Type\"]:\n headeritems[\"Content-Type\"] = \"text/plain; charset=UTF-8\"\n if \"Content-Transfer-Encoding\" not in keys or \"ENCODING\" in headeritems[\"Content-Transfer-Encoding\"]:\n headeritems[\"Content-Transfer-Encoding\"] = \"8bit\"\n headerString = \"\"\n for key, value in headeritems.items():\n if value is not None:\n headerString += \"%s: %s\\n\" % (key, value)\n header.target = headerString\n header.markfuzzy(False) # TODO: check why we do this?\n return header\n\n def _insert_header(self, header):\n # we should be using .addunit() or some equivalent in case the\n # unit needs to refer back to the store, etc. This might be\n # subtly broken for POXLIFF, since we don't dupliate the code\n # from lisa::addunit().\n header._store = self\n self.units.insert(0, header)\n\n def getheaderplural(self):\n \"\"\"Returns the nplural and plural values from the header.\"\"\"\n header = self.parseheader()\n pluralformvalue = header.get('Plural-Forms', None)\n if pluralformvalue is None:\n return None, None\n nplural = re.findall(\"nplurals=(.+?);\", pluralformvalue)\n plural = re.findall(\"plural=(.+?);?$\", pluralformvalue)\n if not nplural or nplural[0] == \"INTEGER\":\n nplural = None\n else:\n nplural = nplural[0]\n if not plural or plural[0] == \"EXPRESSION\":\n plural = None\n else:\n plural = plural[0]\n return nplural, plural\n\n def updateheaderplural(self, nplurals, plural):\n \"\"\"Update the Plural-Form PO header.\"\"\"\n if isinstance(nplurals, six.string_types):\n nplurals = int(nplurals)\n self.updateheader(add=True, Plural_Forms=\"nplurals=%d; plural=%s;\" % (nplurals, plural))\n\n def gettargetlanguage(self):\n \"\"\"Return the target language based on information in the header.\n\n The target language is determined in the following sequence:\n 1. Use the 'Language' entry in the header.\n 2. Poedit's custom headers.\n 3. Analysing the 'Language-Team' entry.\n \"\"\"\n header = self.parseheader()\n lang = header.get('Language', None)\n if lang is not None:\n from translate.lang.data import langcode_ire\n if langcode_ire.match(lang):\n return lang\n else:\n lang = None\n if 'X-Poedit-Language' in header:\n from translate.lang import poedit\n language = header.get('X-Poedit-Language')\n country = header.get('X-Poedit-Country')\n return poedit.isocode(language, country)\n if 'Language-Code' in header: # Used in Plone files\n return header.get('Language-Code')\n if 'Language-Team' in header:\n from translate.lang.team import guess_language\n return guess_language(header.get('Language-Team'))\n return None\n\n def settargetlanguage(self, lang):\n \"\"\"Set the target language in the header.\n\n This removes any custom Poedit headers if they exist.\n\n :param lang: the new target language code\n :type lang: str\n \"\"\"\n if isinstance(lang, six.string_types) and len(lang) > 1:\n self.updateheader(add=True, Language=lang, X_Poedit_Language=None, X_Poedit_Country=None)\n\n def getprojectstyle(self):\n \"\"\"Return the project based on information in the header.\n\n The project is determined in the following sequence:\n 1. Use the 'X-Project-Style' entry in the header.\n 2. Use 'Report-Msgid-Bug-To' entry\n 3. Use the 'X-Accelerator' entry\n 4. Use the Project ID\n 5. Analyse the file itself (not yet implemented)\n \"\"\"\n header = self.parseheader()\n project = header.get('X-Project-Style', None)\n if project is not None:\n return project\n bug_address = header.get('Report-Msgid-Bugs-To', None)\n if bug_address is not None:\n if 'bugzilla.gnome.org' in bug_address:\n return 'gnome'\n if 'bugs.kde.org' in bug_address:\n return 'kde'\n accelerator = header.get('X-Accelerator-Marker', None)\n if accelerator is not None:\n if accelerator == \"~\":\n return \"openoffice\"\n elif accelerator == \"&\":\n return \"mozilla\"\n project_id = header.get('Project-Id-Version', None)\n if project_id is not None:\n if 'gnome' in project_id.lower():\n return \"gnome\"\n # TODO Call some project guessing code and probably move all of the above there also\n return None\n\n def setprojectstyle(self, project_style):\n \"\"\"Set the project in the header.\n\n :param project_style: the new project\n :type project_style: str\n \"\"\"\n self.updateheader(add=True, X_Project_Style=project_style)\n\n def mergeheaders(self, otherstore):\n \"\"\"Merges another header with this header.\n\n This header is assumed to be the template.\n\n :type otherstore: :class:`~translate.storage.base.TranslationStore`\n \"\"\"\n\n newvalues = otherstore.parseheader()\n retain_list = (\"Project-Id-Version\", \"PO-Revision-Date\", \"Last-Translator\",\n \"Language-Team\", \"Plural-Forms\")\n retain = dict((key, newvalues[key]) for key in retain_list if newvalues.get(key, None) and newvalues[key] != default_header.get(key, None))\n self.updateheader(**retain)\n\n def updatecontributor(self, name, email=None):\n \"\"\"Add contribution comments if necessary.\"\"\"\n header = self.header()\n if not header:\n return\n prelines = []\n contriblines = []\n postlines = []\n contribexists = False\n incontrib = False\n outcontrib = False\n for line in header.getnotes(\"translator\").split('\\n'):\n line = line.strip()\n if line == u\"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\":\n incontrib = True\n continue\n if author_re.match(line):\n incontrib = True\n contriblines.append(line)\n continue\n if line == \"\" and incontrib:\n incontrib = False\n outcontrib = True\n if incontrib:\n contriblines.append(line)\n elif not outcontrib:\n prelines.append(line)\n else:\n postlines.append(line)\n\n year = time.strftime(\"%Y\")\n contribexists = False\n for i in range(len(contriblines)):\n line = contriblines[i]\n if name in line and (email is None or email in line):\n contribexists = True\n if year in line:\n break\n else:\n #The contributor is there, but not for this year\n if line[-1] == '.':\n line = line[:-1]\n contriblines[i] = \"%s, %s.\" % (line, year)\n\n if not contribexists:\n # Add a new contributor\n if email:\n contriblines.append(\"%s <%s>, %s.\" % (name, email, year))\n else:\n contriblines.append(\"%s, %s.\" % (name, year))\n\n header.removenotes()\n header.addnote(\"\\n\".join(prelines))\n header.addnote(\"\\n\".join(contriblines))\n header.addnote(\"\\n\".join(postlines))\n\n def makeheader(self, **kwargs):\n \"\"\"Create a header for the given filename.\n\n Check .makeheaderdict() for information on parameters.\n \"\"\"\n headerpo = self.UnitClass(\"\", encoding=self._encoding)\n headerpo.markfuzzy()\n headeritems = self.makeheaderdict(**kwargs)\n headervalue = \"\"\n for (key, value) in headeritems.items():\n if value is None:\n continue\n headervalue += \"%s: %s\\n\" % (key, value)\n headerpo.target = headervalue\n return headerpo\n", "path": "translate/storage/poheader.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2002-2011 Zuza Software Foundation\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"class that handles all header functions for a header in a po file\"\"\"\n\nimport re\nimport six\nimport time\nfrom collections import OrderedDict\n\nfrom translate import __version__\nfrom translate.misc.dictutils import cidict\n\n\nauthor_re = re.compile(r\".*<\\S+@\\S+>.*\\d{4,4}\")\n\ndefault_header = {\n \"Project-Id-Version\": \"PACKAGE VERSION\",\n \"PO-Revision-Date\": \"YEAR-MO-DA HO:MI+ZONE\",\n \"Last-Translator\": \"FULL NAME <EMAIL@ADDRESS>\",\n \"Language-Team\": \"LANGUAGE <[email protected]>\",\n \"Plural-Forms\": \"nplurals=INTEGER; plural=EXPRESSION;\",\n}\n\n\ndef parseheaderstring(input):\n \"\"\"Parses an input string with the definition of a PO header and returns\n the interpreted values as a dictionary.\n \"\"\"\n headervalues = OrderedDict()\n for line in input.split(\"\\n\"):\n if not line or \":\" not in line:\n continue\n key, value = line.split(\":\", 1)\n key = key.strip()\n headervalues[key] = value.strip()\n return headervalues\n\n\ndef tzstring():\n \"\"\"Returns the timezone as a string in the format [+-]0000, eg +0200.\n\n :rtype: str\n \"\"\"\n if time.daylight:\n tzoffset = time.altzone\n else:\n tzoffset = time.timezone\n\n hours, minutes = time.gmtime(abs(tzoffset))[3:5]\n if tzoffset > 0:\n hours *= -1\n tz = str(\"%+d\" % hours).zfill(3) + str(minutes).zfill(2)\n return tz\n\n\ndef update(existing, add=False, **kwargs):\n \"\"\"Update an existing header dictionary with the values in kwargs, adding\n new values only if add is true.\n\n :return: Updated dictionary of header entries\n :rtype: dict of strings\n \"\"\"\n headerargs = OrderedDict()\n fixedargs = cidict()\n for key, value in kwargs.items():\n key = key.replace(\"_\", \"-\")\n if key.islower():\n key = key.title()\n fixedargs[key] = value\n removed = []\n for key in poheader.header_order:\n if key in existing:\n if key in fixedargs:\n headerargs[key] = fixedargs.pop(key)\n else:\n headerargs[key] = existing[key]\n removed.append(key)\n elif add and key in fixedargs:\n headerargs[key] = fixedargs.pop(key)\n for key, value in six.iteritems(existing):\n if key not in removed:\n headerargs[key] = value\n if add:\n for key in fixedargs:\n headerargs[key] = fixedargs[key]\n return headerargs\n\n\nclass poheader(object):\n \"\"\"This class implements functionality for manipulation of po file headers.\n This class is a mix-in class and useless on its own. It must be used from\n all classes which represent a po file\n \"\"\"\n\n x_generator = \"Translate Toolkit %s\" % __version__.sver\n\n header_order = [\n \"Project-Id-Version\",\n \"Report-Msgid-Bugs-To\",\n \"POT-Creation-Date\",\n \"PO-Revision-Date\",\n \"Last-Translator\",\n \"Language-Team\",\n \"Language\",\n \"MIME-Version\",\n \"Content-Type\",\n \"Content-Transfer-Encoding\",\n \"Plural-Forms\",\n \"X-Accelerator-Marker\",\n \"X-Generator\",\n \"X-Merge-On\",\n ]\n\n def init_headers(self, charset='UTF-8', encoding='8bit', **kwargs):\n \"\"\"sets default values for po headers\"\"\"\n #FIXME: we need to allow at least setting target language, pluralforms and generator\n headerdict = self.makeheaderdict(charset=charset, encoding=encoding, **kwargs)\n self.updateheader(add=True, **headerdict)\n return self.header()\n\n def makeheaderdict(self,\n charset=\"CHARSET\",\n encoding=\"ENCODING\",\n project_id_version=None,\n pot_creation_date=None,\n po_revision_date=None,\n last_translator=None,\n language_team=None,\n mime_version=None,\n plural_forms=None,\n report_msgid_bugs_to=None,\n **kwargs):\n \"\"\"Create a header dictionary with useful defaults.\n\n pot_creation_date can be None (current date) or a value (datetime or string)\n po_revision_date can be None (form), False (=pot_creation_date), True (=now),\n or a value (datetime or string)\n\n :return: Dictionary with the header items\n :rtype: dict of strings\n \"\"\"\n if project_id_version is None:\n project_id_version = \"PACKAGE VERSION\"\n if pot_creation_date is None or pot_creation_date is True:\n pot_creation_date = time.strftime(\"%Y-%m-%d %H:%M\") + tzstring()\n if isinstance(pot_creation_date, time.struct_time):\n pot_creation_date = time.strftime(\"%Y-%m-%d %H:%M\", pot_creation_date) + tzstring()\n if po_revision_date is None:\n po_revision_date = \"YEAR-MO-DA HO:MI+ZONE\"\n elif po_revision_date is False:\n po_revision_date = pot_creation_date\n elif po_revision_date is True:\n po_revision_date = time.strftime(\"%Y-%m-%d %H:%M\") + tzstring()\n if isinstance(po_revision_date, time.struct_time):\n po_revision_date = time.strftime(\"%Y-%m-%d %H:%M\", po_revision_date) + tzstring()\n if last_translator is None:\n last_translator = \"FULL NAME <EMAIL@ADDRESS>\"\n if language_team is None:\n language_team = \"LANGUAGE <[email protected]>\"\n if mime_version is None:\n mime_version = \"1.0\"\n if report_msgid_bugs_to is None:\n report_msgid_bugs_to = \"\"\n\n defaultargs = OrderedDict()\n defaultargs[\"Project-Id-Version\"] = project_id_version\n defaultargs[\"Report-Msgid-Bugs-To\"] = report_msgid_bugs_to\n defaultargs[\"POT-Creation-Date\"] = pot_creation_date\n defaultargs[\"PO-Revision-Date\"] = po_revision_date\n defaultargs[\"Last-Translator\"] = last_translator\n defaultargs[\"Language-Team\"] = language_team\n defaultargs[\"MIME-Version\"] = mime_version\n defaultargs[\"Content-Type\"] = \"text/plain; charset=%s\" % charset\n defaultargs[\"Content-Transfer-Encoding\"] = encoding\n if plural_forms:\n defaultargs[\"Plural-Forms\"] = plural_forms\n defaultargs[\"X-Generator\"] = self.x_generator\n\n return update(defaultargs, add=True, **kwargs)\n\n def header(self):\n \"\"\"Returns the header element, or None. Only the first element is\n allowed to be a header. Note that this could still return an empty\n header element, if present.\n \"\"\"\n if len(self.units) == 0:\n return None\n candidate = self.units[0]\n if candidate.isheader():\n return candidate\n else:\n return None\n\n def parseheader(self):\n \"\"\"Parses the PO header and returns the interpreted values as a\n dictionary.\n \"\"\"\n header = self.header()\n if not header:\n return {}\n return parseheaderstring(header.target)\n\n def updateheader(self, add=False, **kwargs):\n \"\"\"Updates the fields in the PO style header.\n\n This will create a header if add == True.\n \"\"\"\n header = self.header()\n if not header:\n if add:\n header = self.makeheader(**kwargs)\n self._insert_header(header)\n else:\n headeritems = update(self.parseheader(), add, **kwargs)\n keys = headeritems.keys()\n if \"Content-Type\" not in keys or \"charset=CHARSET\" in headeritems[\"Content-Type\"]:\n headeritems[\"Content-Type\"] = \"text/plain; charset=UTF-8\"\n if \"Content-Transfer-Encoding\" not in keys or \"ENCODING\" in headeritems[\"Content-Transfer-Encoding\"]:\n headeritems[\"Content-Transfer-Encoding\"] = \"8bit\"\n headerString = \"\"\n for key, value in headeritems.items():\n if value is not None:\n headerString += \"%s: %s\\n\" % (key, value)\n header.target = headerString\n header.markfuzzy(False) # TODO: check why we do this?\n return header\n\n def _insert_header(self, header):\n # we should be using .addunit() or some equivalent in case the\n # unit needs to refer back to the store, etc. This might be\n # subtly broken for POXLIFF, since we don't dupliate the code\n # from lisa::addunit().\n header._store = self\n self.units.insert(0, header)\n\n def getheaderplural(self):\n \"\"\"Returns the nplural and plural values from the header.\"\"\"\n header = self.parseheader()\n pluralformvalue = header.get('Plural-Forms', None)\n if pluralformvalue is None:\n return None, None\n nplural = re.findall(\"nplurals=(.+?);\", pluralformvalue)\n plural = re.findall(\"plural=(.+?);?$\", pluralformvalue)\n if not nplural or nplural[0] == \"INTEGER\":\n nplural = None\n else:\n nplural = nplural[0]\n if not plural or plural[0] == \"EXPRESSION\":\n plural = None\n else:\n plural = plural[0]\n return nplural, plural\n\n def updateheaderplural(self, nplurals, plural):\n \"\"\"Update the Plural-Form PO header.\"\"\"\n if isinstance(nplurals, six.string_types):\n nplurals = int(nplurals)\n self.updateheader(add=True, Plural_Forms=\"nplurals=%d; plural=%s;\" % (nplurals, plural))\n\n def gettargetlanguage(self):\n \"\"\"Return the target language based on information in the header.\n\n The target language is determined in the following sequence:\n 1. Use the 'Language' entry in the header.\n 2. Poedit's custom headers.\n 3. Analysing the 'Language-Team' entry.\n \"\"\"\n header = self.parseheader()\n lang = header.get('Language', None)\n if lang is not None:\n from translate.lang.data import langcode_ire\n if langcode_ire.match(lang):\n return lang\n else:\n lang = None\n if 'X-Poedit-Language' in header:\n from translate.lang import poedit\n language = header.get('X-Poedit-Language')\n country = header.get('X-Poedit-Country')\n return poedit.isocode(language, country)\n if 'Language-Code' in header: # Used in Plone files\n return header.get('Language-Code')\n if 'Language-Team' in header:\n from translate.lang.team import guess_language\n return guess_language(header.get('Language-Team'))\n return None\n\n def settargetlanguage(self, lang):\n \"\"\"Set the target language in the header.\n\n This removes any custom Poedit headers if they exist.\n\n :param lang: the new target language code\n :type lang: str\n \"\"\"\n if isinstance(lang, six.string_types) and len(lang) > 1:\n self.updateheader(add=True, Language=lang, X_Poedit_Language=None, X_Poedit_Country=None)\n\n def getprojectstyle(self):\n \"\"\"Return the project based on information in the header.\n\n The project is determined in the following sequence:\n 1. Use the 'X-Project-Style' entry in the header.\n 2. Use 'Report-Msgid-Bug-To' entry\n 3. Use the 'X-Accelerator' entry\n 4. Use the Project ID\n 5. Analyse the file itself (not yet implemented)\n \"\"\"\n header = self.parseheader()\n project = header.get('X-Project-Style', None)\n if project is not None:\n return project\n bug_address = header.get('Report-Msgid-Bugs-To', None)\n if bug_address is not None:\n if 'bugzilla.gnome.org' in bug_address:\n return 'gnome'\n if 'bugs.kde.org' in bug_address:\n return 'kde'\n accelerator = header.get('X-Accelerator-Marker', None)\n if accelerator is not None:\n if accelerator == \"~\":\n return \"openoffice\"\n elif accelerator == \"&\":\n return \"mozilla\"\n project_id = header.get('Project-Id-Version', None)\n if project_id is not None:\n if 'gnome' in project_id.lower():\n return \"gnome\"\n # TODO Call some project guessing code and probably move all of the above there also\n return None\n\n def setprojectstyle(self, project_style):\n \"\"\"Set the project in the header.\n\n :param project_style: the new project\n :type project_style: str\n \"\"\"\n self.updateheader(add=True, X_Project_Style=project_style)\n\n def mergeheaders(self, otherstore):\n \"\"\"Merges another header with this header.\n\n This header is assumed to be the template.\n\n :type otherstore: :class:`~translate.storage.base.TranslationStore`\n \"\"\"\n\n newvalues = otherstore.parseheader()\n retain_list = (\"Project-Id-Version\", \"PO-Revision-Date\", \"Last-Translator\",\n \"Language-Team\", \"Plural-Forms\")\n retain = dict((key, newvalues[key]) for key in retain_list if newvalues.get(key, None) and newvalues[key] != default_header.get(key, None))\n self.updateheader(**retain)\n\n def updatecontributor(self, name, email=None):\n \"\"\"Add contribution comments if necessary.\"\"\"\n header = self.header()\n if not header:\n return\n prelines = []\n contriblines = []\n postlines = []\n contribexists = False\n incontrib = False\n outcontrib = False\n for line in header.getnotes(\"translator\").split('\\n'):\n line = line.strip()\n if line == u\"FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\":\n incontrib = True\n continue\n if author_re.match(line):\n incontrib = True\n contriblines.append(line)\n continue\n if line == \"\" and incontrib:\n incontrib = False\n outcontrib = True\n if incontrib:\n contriblines.append(line)\n elif not outcontrib:\n prelines.append(line)\n else:\n postlines.append(line)\n\n year = time.strftime(\"%Y\")\n contribexists = False\n for i in range(len(contriblines)):\n line = contriblines[i]\n if name in line and (email is None or email in line):\n contribexists = True\n if year in line:\n break\n else:\n #The contributor is there, but not for this year\n if line[-1] == '.':\n line = line[:-1]\n contriblines[i] = \"%s, %s.\" % (line, year)\n\n if not contribexists:\n # Add a new contributor\n if email:\n contriblines.append(\"%s <%s>, %s.\" % (name, email, year))\n else:\n contriblines.append(\"%s, %s.\" % (name, year))\n\n header.removenotes()\n header.addnote(\"\\n\".join(prelines))\n header.addnote(\"\\n\".join(contriblines))\n header.addnote(\"\\n\".join(postlines))\n\n def makeheader(self, **kwargs):\n \"\"\"Create a header for the given filename.\n\n Check .makeheaderdict() for information on parameters.\n \"\"\"\n headerpo = self.UnitClass(\"\", encoding=self._encoding)\n headerpo.markfuzzy()\n headeritems = self.makeheaderdict(**kwargs)\n headervalue = \"\"\n for (key, value) in headeritems.items():\n if value is None:\n continue\n headervalue += \"%s: %s\\n\" % (key, value)\n headerpo.target = headervalue\n return headerpo\n", "path": "translate/storage/poheader.py" } ]
diff --git a/translate/storage/poheader.py b/translate/storage/poheader.py index ce89344f42..a229b836b1 100644 --- a/translate/storage/poheader.py +++ b/translate/storage/poheader.py @@ -48,8 +48,7 @@ def parseheaderstring(input): if not line or ":" not in line: continue key, value = line.split(":", 1) - #We don't want unicode keys - key = str(key.strip()) + key = key.strip() headervalues[key] = value.strip() return headervalues diff --git a/translate/storage/test_pypo.py b/translate/storage/test_pypo.py index 4438dbff4d..22bf6b1bc0 100644 --- a/translate/storage/test_pypo.py +++ b/translate/storage/test_pypo.py @@ -269,6 +269,33 @@ def test_unassociated_comments(self): print(bytes(oldfile)) assert len(oldfile.units) == 1 + def test_unicode_header(self): + """checks that unicode header is parsed and saved correctly""" + posource = r'''msgid "" +msgstr "" +"PO-Revision-Date: 2006-02-09 23:33+0200\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8-bit\n" +"Zkouška: něco\n" +'''.encode('utf-8') + pofile = self.poparse(posource) + assert pofile.parseheader() == { + 'Content-Transfer-Encoding': '8-bit', + 'Content-Type': 'text/plain; charset=UTF-8', + 'MIME-Version': '1.0', + 'PO-Revision-Date': '2006-02-09 23:33+0200', + 'Zkouška': 'něco' + } + update = {'zkouška': 'else'} + pofile.updateheader(add=True, **update) + assert pofile.units[0].target == """PO-Revision-Date: 2006-02-09 23:33+0200 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8-bit +Zkouška: else +""" + def test_prevmsgid_parse(self): """checks that prevmsgid (i.e. #|) is parsed and saved correctly""" posource = r'''msgid ""
apluslms__a-plus-1179
Improve access denied messages and 403 page Improve access denied messages and make them somewhat more explanatory, and also improve the 403 page so it's not quite so aggressive towards would-be learners. See [Juha's comment](https://github.com/apluslms/a-plus/pull/853#discussion_r692139980) on PR #853 Improve access denied messages and 403 page Fixes #858 # Description **What?** Improve access denied message and 403 page **Why?** To make it more clear why user may not access certain pages **How?** By adding new error texts and changing how the 403 page looks like so the user can more easily navigate a-plus in case they get to the 403 page. Fixes #858 # Testing **Remember to add or update unit tests for new features and changes.** * How to [test your changes in A-plus](https://github.com/apluslms/a-plus/tree/master/doc#running-tests-and-updating-translations) * How to [test accessibility](https://wiki.aalto.fi/display/EDIT/How+to+check+the+accessibility+of+pull+requests) **What type of test did you run?** - [ ] Accessibility test using the [WAVE](https://wave.webaim.org/extension/) extension. - [ ] Django unit tests. - [ ] Selenium tests. - [ ] Other test. *(Add a description below)* - [x] Manual testing. [ADD A DESCRIPTION ABOUT WHAT YOU TESTED MANUALLY] **Did you test the changes in** - [ ] Chrome - [x] Firefox - [ ] This pull request cannot be tested in the browser. **Think of what is affected by these changes and could become broken** # Translation - [x] Did you modify or add new strings in the user interface? ([Read about how to create translation](https://github.com/apluslms/a-plus/tree/master/doc#running-tests-and-updating-translations)) # Programming style - [ ] Did you follow our [style guides](https://apluslms.github.io/contribute/styleguides/)? - [ ] Did you use Python type hinting in all functions that you added or edited? ([type hints](https://docs.python.org/3/library/typing.html) for function parameters and return values) # Have you updated the README or other relevant documentation? - [ ] documents inside the doc directory. - [ ] README.md. - [ ] Aplus Manual. - [ ] Other documentation (mention below which documentation). # Is it Done? - [ ] Reviewer has finished the code review - [ ] After the review, the developer has made changes accordingly - [ ] Customer/Teacher has accepted the implementation of the feature *Clean up your git commit history before submitting the pull request!*
[ { "content": "\"\"\"\nBase permission classes.\n\nThese classes use same interface than ones in django-rest-framework and\nare usable with APIViews too.\n\"\"\"\nimport string\n\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.permissions import BasePermission as Permission\n\nfrom lib.helpers import Enum\n\n\nSAFE_METHODS = ('GET', 'HEAD', 'OPTIONS')\n\n\nclass FilterBackend:\n \"\"\"\n FilterBackend interface\n \"\"\"\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a filtered queryset.\n \"\"\"\n raise NotImplementedError\n\n def get_fields(self, view): # pylint: disable=unused-argument\n return []\n\n\nclass NoPermission(Permission):\n \"\"\"\n Base Permission class that gives no access permission to anyone.\n \"\"\"\n def has_permission(self, request, view):\n return False\n\n def has_object_permission(self, request, view, obj):\n return False\n\n\nclass MessageMixin:\n \"\"\"\n Adds easy way to specify what exactly caused the PermissionDenied\n \"\"\"\n def error_msg(self, message: str, delim=None, format=None, replace=False): # pylint: disable=redefined-builtin\n \"\"\"\n Add extra text to self.message about the reason why permission\n was denied. Uses lazy object so the message string is evaluated\n only when rendered.\n\n If optional argument `format` is given, then it's used with format_lazy\n to format the message with the dictionary arguments from `format` arg.\n\n Optional argument `delim` can be used to change the string used to join\n self.message and `message`.\n\n If optional argument `replace` is true, then self.message is replaced with\n the `message`.\n \"\"\"\n if delim is None:\n delim = ': '\n\n if format:\n message = format_lazy(message, **format)\n\n if replace:\n self.message = message\n else:\n assert 'message' not in self.__dict__, (\n \"You are calling error_msg without replace=True \"\n \"after calling it with it first. Fix your code by removing \"\n \"the first method call and add replace=True to the second method call too.\"\n )\n msg_without_end_punctuation = (\n self.message[0:-1] if self.message[-1] in string.punctuation\n else self.message\n )\n self.message = format_lazy(\n '{}{}{}',\n msg_without_end_punctuation,\n delim,\n message,\n )\n\n\n# Access mode\n# ===========\n\n# All access levels\nACCESS = Enum(\n ('ANONYMOUS', 0, _('ACCESS_ANYONE')),\n ('ENROLL', 1, None),\n ('STUDENT', 3, _('ACCESS_ANY_STUDENT')),\n ('ENROLLED', 4, _('ACCESS_ENROLLED_STUDENT')),\n ('ASSISTANT', 5, _('ACCESS_COURSE_ASSISTANT')),\n ('GRADING', 6, _('ACCESS_GRADING')),\n ('TEACHER', 10, _('ACCESS_TEACHER')),\n ('SUPERUSER', 100, _('ACCESS_SUPERUSER')),\n)\n\n\nclass AccessModePermission(MessageMixin, Permission):\n \"\"\"\n If view has access_mode that is not anonymous, then require authentication\n \"\"\"\n message = _('ACCESS_PERMISSION_DENIED_MSG')\n\n def has_permission(self, request, view):\n access_mode = view.get_access_mode()\n\n if access_mode == ACCESS.ANONYMOUS:\n return True\n if not request.user.is_authenticated:\n return False\n\n if access_mode >= ACCESS.SUPERUSER:\n return request.user.is_superuser\n\n if access_mode >= ACCESS.TEACHER:\n if not view.is_teacher:\n self.error_msg(_('ACCESS_ERROR_ONLY_TEACHERS'))\n return False\n\n elif access_mode >= ACCESS.ASSISTANT:\n if not view.is_course_staff:\n self.error_msg(_('ACCESS_ERROR_ONLY_COURSE_STAFF'))\n return False\n\n elif access_mode == ACCESS.ENROLLED:\n if not view.is_course_staff and not view.is_student:\n self.error_msg(_('ACCESS_ERROR_ONLY_ENROLLED_STUDENTS'))\n return False\n\n return True\n\n\n# Object permissions\n# ==================\n\n\nclass ObjectVisibleBasePermission(MessageMixin, Permission):\n model = None\n obj_var = None\n\n def has_permission(self, request, view):\n obj = getattr(view, self.obj_var, None)\n return (\n obj is None or\n self.has_object_permission(request, view, obj)\n )\n\n def has_object_permission(self, request, view, obj):\n user = request.user\n return (\n # skip objects that are not the model in question\n not isinstance(obj, self.model) or # pylint: disable=isinstance-second-argument-not-valid-type\n user.is_staff or\n user.is_superuser or\n self.is_object_visible(request, view, obj)\n )\n\n def is_object_visible(self, request, view, obj):\n raise NotImplementedError\n", "path": "authorization/permissions.py" } ]
[ { "content": "\"\"\"\nBase permission classes.\n\nThese classes use same interface than ones in django-rest-framework and\nare usable with APIViews too.\n\"\"\"\nimport string\n\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.permissions import BasePermission as Permission\n\nfrom lib.helpers import Enum\n\n\nSAFE_METHODS = ('GET', 'HEAD', 'OPTIONS')\n\n\nclass FilterBackend:\n \"\"\"\n FilterBackend interface\n \"\"\"\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a filtered queryset.\n \"\"\"\n raise NotImplementedError\n\n def get_fields(self, view): # pylint: disable=unused-argument\n return []\n\n\nclass NoPermission(Permission):\n \"\"\"\n Base Permission class that gives no access permission to anyone.\n \"\"\"\n def has_permission(self, request, view):\n return False\n\n def has_object_permission(self, request, view, obj):\n return False\n\n\nclass MessageMixin:\n \"\"\"\n Adds easy way to specify what exactly caused the PermissionDenied\n \"\"\"\n def error_msg(self, message: str, delim=None, format=None, replace=False): # pylint: disable=redefined-builtin\n \"\"\"\n Add extra text to self.message about the reason why permission\n was denied. Uses lazy object so the message string is evaluated\n only when rendered.\n\n If optional argument `format` is given, then it's used with format_lazy\n to format the message with the dictionary arguments from `format` arg.\n\n Optional argument `delim` can be used to change the string used to join\n self.message and `message`.\n\n If optional argument `replace` is true, then self.message is replaced with\n the `message`.\n \"\"\"\n if delim is None:\n delim = ': '\n\n if format:\n message = format_lazy(message, **format)\n\n if replace:\n self.message = message\n else:\n assert 'message' not in self.__dict__, (\n \"You are calling error_msg without replace=True \"\n \"after calling it with it first. Fix your code by removing \"\n \"the first method call and add replace=True to the second method call too.\"\n )\n msg_without_end_punctuation = (\n self.message[0:-1] if self.message[-1] in string.punctuation\n else self.message\n )\n self.message = format_lazy(\n '{}{}{}',\n msg_without_end_punctuation,\n delim,\n message,\n )\n\n\n# Access mode\n# ===========\n\n# All access levels\nACCESS = Enum(\n ('ANONYMOUS', 0, _('ACCESS_ANYONE')),\n ('ENROLL', 1, None),\n ('STUDENT', 3, _('ACCESS_ANY_STUDENT')),\n ('ENROLLED', 4, _('ACCESS_ENROLLED_STUDENT')),\n ('ASSISTANT', 5, _('ACCESS_COURSE_ASSISTANT')),\n ('GRADING', 6, _('ACCESS_GRADING')),\n ('TEACHER', 10, _('ACCESS_TEACHER')),\n ('SUPERUSER', 100, _('ACCESS_SUPERUSER')),\n)\n\n\nclass AccessModePermission(MessageMixin, Permission):\n \"\"\"\n If view has access_mode that is not anonymous, then require authentication\n \"\"\"\n message = _('ACCESS_PERMISSION_DENIED_MSG')\n\n def has_permission(self, request, view):\n access_mode = view.get_access_mode()\n\n if access_mode == ACCESS.ANONYMOUS:\n return True\n if not request.user.is_authenticated:\n self.error_msg(_('ACCESS_ERROR_ONLY_AUTHENTICATED'))\n return False\n\n if access_mode >= ACCESS.SUPERUSER:\n return request.user.is_superuser\n\n if access_mode >= ACCESS.TEACHER:\n if not view.is_teacher:\n self.error_msg(_('ACCESS_ERROR_ONLY_TEACHERS'))\n return False\n\n elif access_mode >= ACCESS.ASSISTANT:\n if not view.is_course_staff:\n self.error_msg(_('ACCESS_ERROR_ONLY_COURSE_STAFF'))\n return False\n\n elif access_mode == ACCESS.ENROLLED:\n if not view.is_course_staff and not view.is_student:\n self.error_msg(_('ACCESS_ERROR_ONLY_ENROLLED_STUDENTS'))\n return False\n\n return True\n\n\n# Object permissions\n# ==================\n\n\nclass ObjectVisibleBasePermission(MessageMixin, Permission):\n model = None\n obj_var = None\n\n def has_permission(self, request, view):\n obj = getattr(view, self.obj_var, None)\n return (\n obj is None or\n self.has_object_permission(request, view, obj)\n )\n\n def has_object_permission(self, request, view, obj):\n user = request.user\n return (\n # skip objects that are not the model in question\n not isinstance(obj, self.model) or # pylint: disable=isinstance-second-argument-not-valid-type\n user.is_staff or\n user.is_superuser or\n self.is_object_visible(request, view, obj)\n )\n\n def is_object_visible(self, request, view, obj):\n raise NotImplementedError\n", "path": "authorization/permissions.py" } ]
diff --git a/authorization/permissions.py b/authorization/permissions.py index be2696ce8..2016b5c70 100644 --- a/authorization/permissions.py +++ b/authorization/permissions.py @@ -114,6 +114,7 @@ def has_permission(self, request, view): if access_mode == ACCESS.ANONYMOUS: return True if not request.user.is_authenticated: + self.error_msg(_('ACCESS_ERROR_ONLY_AUTHENTICATED')) return False if access_mode >= ACCESS.SUPERUSER: diff --git a/locale/en/LC_MESSAGES/django.po b/locale/en/LC_MESSAGES/django.po index b0f0d5952..60b38223d 100644 --- a/locale/en/LC_MESSAGES/django.po +++ b/locale/en/LC_MESSAGES/django.po @@ -6,7 +6,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-02-07 16:41+0200\n" +"POT-Creation-Date: 2023-05-10 14:22+0300\n" "PO-Revision-Date: 2021-05-27 14:47+0300\n" "Last-Translator: Jimmy Ihalainen <[email protected]>\n" "Language-Team: English<>\n" @@ -200,6 +200,10 @@ msgstr "A superuser of the service" msgid "ACCESS_PERMISSION_DENIED_MSG" msgstr "Unfortunately you are not permitted to see this content." +#: authorization/permissions.py +msgid "ACCESS_ERROR_ONLY_AUTHENTICATED" +msgstr "Please login." + #: authorization/permissions.py msgid "ACCESS_ERROR_ONLY_TEACHERS" msgstr "Only course teachers shall pass." @@ -372,7 +376,8 @@ msgstr "anonymized ID" msgid "LABEL_ROLE" msgstr "role" -#: course/models.py exercise/exercise_models.py exercise/submission_models.py +#: course/models.py exercise/admin.py exercise/exercise_models.py +#: exercise/submission_models.py msgid "LABEL_STATUS" msgstr "status" @@ -2922,6 +2927,10 @@ msgstr "Submitters" msgid "LABEL_SUBMITTERS" msgstr "Submitters" +#: exercise/admin.py exercise/submission_models.py notification/models.py +msgid "LABEL_SUBMISSION" +msgstr "submission" + #: exercise/api/views.py exercise/views.py msgid "ERROR_SUBMISSION_SAVING_FAILED" msgstr "" @@ -3718,10 +3727,6 @@ msgstr "submission draft" msgid "MODEL_NAME_SUBMISSION_DRAFT_PLURAL" msgstr "submission drafts" -#: exercise/submission_models.py notification/models.py -msgid "LABEL_SUBMISSION" -msgstr "submission" - #: exercise/submission_models.py msgid "LABEL_PARAM_NAME" msgstr "parameter name" @@ -5037,8 +5042,8 @@ msgstr "" msgid "EXTERNAL_SERVICE_SENT_ACCESS_TOKEN_COURSE_STAFF -- %(brand)s" msgstr "" "Moreover, we send this service a so-called <a href=\"https://en.wikipedia." -"org/wiki/Access_token\" rel=\"noopener nofollow\" target=\"_blank\" class=" -"\"alert-link\">access token</a>, which gives the service access to the " +"org/wiki/Access_token\" rel=\"noopener nofollow\" target=\"_blank\" " +"class=\"alert-link\">access token</a>, which gives the service access to the " "%(brand)s API at your privilege level. If you don't want that to happen, " "please contact %(brand)s administration." @@ -5047,8 +5052,8 @@ msgstr "" msgid "EXTERNAL_SERVICE_SENT_ACCESS_TOKEN_STUDENT -- %(brand)s" msgstr "" "Moreover, we send this service a so-called <a href=\"https://en.wikipedia." -"org/wiki/Access_token\" rel=\"noopener nofollow\" target=\"_blank\" class=" -"\"alert-link\">access token</a>, which gives the service access to the " +"org/wiki/Access_token\" rel=\"noopener nofollow\" target=\"_blank\" " +"class=\"alert-link\">access token</a>, which gives the service access to the " "%(brand)s API at your privilege level. This means that the service can, for " "example, access your student ID number and your progress in the course, and " "submit solutions to assignments. The course staff has enabled this feature " @@ -5058,8 +5063,8 @@ msgstr "" #, python-format msgid "EXTERNAL_SERVICE_PRIVACY_HOSTED_INTERNALLY -- %(brand)s, %(url)s" msgstr "" -"This service is hosted by us (%(brand)s administrators) and thus our <a href=" -"\"%(url)s\" target=\"_blank\">privacy notice</a> covers it too." +"This service is hosted by us (%(brand)s administrators) and thus our <a " +"href=\"%(url)s\" target=\"_blank\">privacy notice</a> covers it too." #: external_services/templates/external_services/_privacy.html #, python-format @@ -5073,21 +5078,22 @@ msgid "EXTERNAL_SERVICE_PRIVACY_HOSTED_IN_EEA" msgstr "" "This service is hosted within the European Economics Area and is thus " "subject to <a href=\"https://ec.europa.eu/info/law/law-topic/data-protection/" -"reform_en\" rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-link" -"\">the European Union's General Data Protection Regulation</a>." +"reform_en\" rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-" +"link\">the European Union's General Data Protection Regulation</a>." #: external_services/templates/external_services/_privacy.html msgid "EXTERNAL_SERVICE_PRIVACY_PRIVACYSHIELD" msgstr "" "This service is hosted outside of the European Economics Area and is thus " "not covered by <a href=\"https://ec.europa.eu/info/law/law-topic/data-" -"protection/reform_en\" rel=\"noopener nofollow\" target=\"_blank\" class=" -"\"alert-link\">the European Union's General Data Protection Regulation</a> " -"(GDPR). The service used to be certified under <a href=\"https://www." -"privacyshield.gov/Individuals-in-Europe\" rel=\"noopener nofollow\" target=" -"\"_blank\" class=\"alert-link\">the EU-U.S. Privacy Shield</a>, but the " -"European Court of Justice has declared the EU-U.S. Privacy Shield invalid. " -"We are investigating how this affects the use of the service." +"protection/reform_en\" rel=\"noopener nofollow\" target=\"_blank\" " +"class=\"alert-link\">the European Union's General Data Protection " +"Regulation</a> (GDPR). The service used to be certified under <a " +"href=\"https://www.privacyshield.gov/Individuals-in-Europe\" rel=\"noopener " +"nofollow\" target=\"_blank\" class=\"alert-link\">the EU-U.S. Privacy " +"Shield</a>, but the European Court of Justice has declared the EU-U.S. " +"Privacy Shield invalid. We are investigating how this affects the use of the " +"service." #: external_services/templates/external_services/_privacy.html msgid "EXTERNAL_SERVICE_PRIVACY_GLOBAL" diff --git a/locale/fi/LC_MESSAGES/django.po b/locale/fi/LC_MESSAGES/django.po index b9240bb56..30700f779 100644 --- a/locale/fi/LC_MESSAGES/django.po +++ b/locale/fi/LC_MESSAGES/django.po @@ -6,7 +6,7 @@ msgid "" msgstr "" "Project-Id-Version: \n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-02-07 16:41+0200\n" +"POT-Creation-Date: 2023-05-10 14:22+0300\n" "PO-Revision-Date: 2019-08-14 12:16+0200\n" "Last-Translator: Jimmy Ihalainen <[email protected]>\n" "Language-Team: Finnish <>\n" @@ -201,6 +201,10 @@ msgstr "Palvelun hallintokäyttäjä" msgid "ACCESS_PERMISSION_DENIED_MSG" msgstr "Valitettavasti sinulla ei ole oikeutta nähdä tätä sisältöä." +#: authorization/permissions.py +msgid "ACCESS_ERROR_ONLY_AUTHENTICATED" +msgstr "Kirjaudu sisään." + #: authorization/permissions.py msgid "ACCESS_ERROR_ONLY_TEACHERS" msgstr "Vain opettajille." @@ -373,7 +377,8 @@ msgstr "anonymisoitu ID" msgid "LABEL_ROLE" msgstr "rooli" -#: course/models.py exercise/exercise_models.py exercise/submission_models.py +#: course/models.py exercise/admin.py exercise/exercise_models.py +#: exercise/submission_models.py msgid "LABEL_STATUS" msgstr "tila" @@ -1174,8 +1179,8 @@ msgstr "[henkilökunnalla ei ole]" #, python-format msgid "GROUP_PERSONAL_JOIN_CODE -- %(code)s" msgstr "" -"Henkilökohtainen ryhmään <strong>liittymiskoodisi</strong> on <strong>" -"%(code)s</strong>. " +"Henkilökohtainen ryhmään <strong>liittymiskoodisi</strong> on " +"<strong>%(code)s</strong>. " #: course/templates/course/groups.html msgid "GROUP_CREATION_INSTRUCTIONS" @@ -2532,7 +2537,8 @@ msgstr "Tuo sisältöasetukset URL-osoitteesta" #: edit_course/templates/edit_course/edit_content.html msgid "APPLY_CHANGES_TOOLTIP" msgstr "" -"Vertaa uusia sisältöasetuksia aikaisempiin, ja päivittää vain muuttuneet osat." +"Vertaa uusia sisältöasetuksia aikaisempiin, ja päivittää vain muuttuneet " +"osat." #: edit_course/templates/edit_course/edit_content.html msgid "APPLY_CHANGES" @@ -2935,6 +2941,10 @@ msgstr "Opiskelijat" msgid "LABEL_SUBMITTERS" msgstr "Palauttajat" +#: exercise/admin.py exercise/submission_models.py notification/models.py +msgid "LABEL_SUBMISSION" +msgstr "palautus" + #: exercise/api/views.py exercise/views.py msgid "ERROR_SUBMISSION_SAVING_FAILED" msgstr "Palautuksen tallentaminen epäonnistui. Palautusta ei ole huomioitu." @@ -3725,10 +3735,6 @@ msgstr "palautusluonnos" msgid "MODEL_NAME_SUBMISSION_DRAFT_PLURAL" msgstr "palautusluonnokset" -#: exercise/submission_models.py notification/models.py -msgid "LABEL_SUBMISSION" -msgstr "palautus" - #: exercise/submission_models.py msgid "LABEL_PARAM_NAME" msgstr "parametrin nimi" @@ -3985,8 +3991,8 @@ msgstr "Palauttajia: %(number)s <small>%(percentage)s%%</small>" #, python-format msgid "USER_LAST_VISITED -- %(name)s, %(link)s" msgstr "" -"Olet ollut oppimateriaalissa viimeksi kohdassa <a href=\"%(link)s\">" -"%(name)s</a>" +"Olet ollut oppimateriaalissa viimeksi kohdassa <a " +"href=\"%(link)s\">%(name)s</a>" #: exercise/templates/exercise/_user_last.html #, python-format @@ -5056,29 +5062,30 @@ msgstr "" msgid "EXTERNAL_SERVICE_SENT_ACCESS_TOKEN_COURSE_STAFF -- %(brand)s" msgstr "" "Lisäksi palvelulle lähetetään <a href=\"https://en.wikipedia.org/wiki/" -"Access_token\" rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-link" -"\">käyttöoikeustietue</a>, joka mahdollistaa palvelun pääsyn %(brand)s:n API:" -"in sinun oikeuksillasi. Mikäli näin ei pitäisi tapahtua, niin ota yhteyttä " -"%(brand)s:n ylläpitoon." +"Access_token\" rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-" +"link\">käyttöoikeustietue</a>, joka mahdollistaa palvelun pääsyn %(brand)s:n " +"API:in sinun oikeuksillasi. Mikäli näin ei pitäisi tapahtua, niin ota " +"yhteyttä %(brand)s:n ylläpitoon." #: external_services/templates/external_services/_privacy.html #, python-format msgid "EXTERNAL_SERVICE_SENT_ACCESS_TOKEN_STUDENT -- %(brand)s" msgstr "" "Lisäksi palvelulle lähetetään <a href=\"https://en.wikipedia.org/wiki/" -"Access_token\" rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-link" -"\">käyttöoikeustietue</a>, joka mahdollistaa palvelun pääsyn %(brand)s:n API:" -"in sinun oikeuksillasi. Tämä tarkoittaa sitä, että palvelu voi mm. selvittää " -"opiskelijanumerosi, nähdä kurssisuorituksiasi ja palauttaa tehtäviä " -"puolestasi. Kurssin henkilökunta on mahdollistanut tämän ja luottaa palvelun " -"toimivan etujesi mukaisesti." +"Access_token\" rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-" +"link\">käyttöoikeustietue</a>, joka mahdollistaa palvelun pääsyn %(brand)s:n " +"API:in sinun oikeuksillasi. Tämä tarkoittaa sitä, että palvelu voi mm. " +"selvittää opiskelijanumerosi, nähdä kurssisuorituksiasi ja palauttaa " +"tehtäviä puolestasi. Kurssin henkilökunta on mahdollistanut tämän ja luottaa " +"palvelun toimivan etujesi mukaisesti." #: external_services/templates/external_services/_privacy.html #, python-format msgid "EXTERNAL_SERVICE_PRIVACY_HOSTED_INTERNALLY -- %(brand)s, %(url)s" msgstr "" -"Tämä palvelu on meidän ylläpitämämme (%(brand)s), joten meidän <a href=" -"\"%(url)s\" target=\"_blank\">tietosuojailmoituksemme</a> kattaa myös sen. " +"Tämä palvelu on meidän ylläpitämämme (%(brand)s), joten meidän <a " +"href=\"%(url)s\" target=\"_blank\">tietosuojailmoituksemme</a> kattaa myös " +"sen. " #: external_services/templates/external_services/_privacy.html #, python-format @@ -5090,9 +5097,9 @@ msgstr "" #: external_services/templates/external_services/_privacy.html msgid "EXTERNAL_SERVICE_PRIVACY_HOSTED_IN_EEA" msgstr "" -"Tämä palvelu sijaitsee Euroopan talousalueella, joten sitä koskee <a href=" -"\"https://ec.europa.eu/info/law/law-topic/data-protection/reform_fi\" rel=" -"\"noopener nofollow\" target=\"_blank\" class=\"alert-link\">Euroopan " +"Tämä palvelu sijaitsee Euroopan talousalueella, joten sitä koskee <a " +"href=\"https://ec.europa.eu/info/law/law-topic/data-protection/reform_fi\" " +"rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-link\">Euroopan " "unionin yleinen tietosuoja-asetus</a>." #: external_services/templates/external_services/_privacy.html @@ -5100,12 +5107,12 @@ msgid "EXTERNAL_SERVICE_PRIVACY_PRIVACYSHIELD" msgstr "" "Tämä palvelu sijaitsee Euroopan talousalueen ulkopuolella, joten sitä ei " "koske <a href=\"https://ec.europa.eu/info/law/law-topic/data-protection/" -"reform_fi\" rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-link" -"\">Euroopan unionin yleinen tietosuoja-asetus</a> (GDPR). Palvelu oli " -"aiemmin <a href=\"https://www.privacyshield.gov/Individuals-in-Europe\" rel=" -"\"noopener nofollow\" target=\"_blank\" class=\"alert-link\">EU-U.S. Privacy " -"Shield</a> -sertifioitu, mutta Euroopan unionin tuomioistuin on kumonnut " -"päätöksen EU-U.S. Privacy Shield -järjestelyn tietosuojan tason " +"reform_fi\" rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-" +"link\">Euroopan unionin yleinen tietosuoja-asetus</a> (GDPR). Palvelu oli " +"aiemmin <a href=\"https://www.privacyshield.gov/Individuals-in-Europe\" " +"rel=\"noopener nofollow\" target=\"_blank\" class=\"alert-link\">EU-U.S. " +"Privacy Shield</a> -sertifioitu, mutta Euroopan unionin tuomioistuin on " +"kumonnut päätöksen EU-U.S. Privacy Shield -järjestelyn tietosuojan tason " "riittävyydestä. Selvitämme edelleen, kuinka uusi päätös vaikuttaa palvelun " "käyttöön. " @@ -5127,8 +5134,8 @@ msgstr "" #, python-format msgid "EXTERNAL_SERVICE_LOOK_WITHOUT_SENDING_DATA -- %(url)s" msgstr "" -"Voit vierailla palvelussa lähettämättä tietoja: <a href=\"%(url)s\" rel=" -"\"noopener nofollow\" target=\"_blank\">palvelun etusivu</a>." +"Voit vierailla palvelussa lähettämättä tietoja: <a href=\"%(url)s\" " +"rel=\"noopener nofollow\" target=\"_blank\">palvelun etusivu</a>." #: external_services/templates/external_services/edit_menu.html msgid "ADD_MENU_ITEM" diff --git a/selenium_test/test/locators.py b/selenium_test/test/locators.py index 7c86a27a3..7e69945e2 100644 --- a/selenium_test/test/locators.py +++ b/selenium_test/test/locators.py @@ -1,7 +1,11 @@ from selenium.webdriver.common.by import By class CommonLocators: - FORBIDDEN_PAGE = (By.XPATH, "//div[@class='page-header']/h1[contains(text(), '403 Forbidden')]") + FORBIDDEN_PAGE = ( + By.XPATH, + "//div[@class='message'][contains(text(), " + "'Unfortunately you are not permitted to see this content')]" + ) PERMISSION_DENIED_ACCESS_MODE = ( By.XPATH, "//main[@id='content']//div[@class='message'][contains(text(), " diff --git a/selenium_test/test/teacher_list_test.py b/selenium_test/test/teacher_list_test.py index 586a88f73..27cc43e42 100644 --- a/selenium_test/test/teacher_list_test.py +++ b/selenium_test/test/teacher_list_test.py @@ -26,7 +26,7 @@ def __init__( query.append("end_date=" + end_date.isoformat()) query.append("with_assistants=" + ("true" if with_assistants else "false")) - self.load("/accounts/teachers/?" + "&".join(query), (By.CSS_SELECTOR, '.page-header')) + self.load("/accounts/teachers/?" + "&".join(query), (By.CSS_SELECTOR, '.container-fluid')) def get_table_data(self) -> List[List[str]]: rows = self.getElements((By.CSS_SELECTOR, "table tbody tr")) diff --git a/templates/403.html b/templates/403.html index bc1d9a5d7..888f557fe 100644 --- a/templates/403.html +++ b/templates/403.html @@ -1,34 +1,12 @@ +{% extends "base.html" %} {% load i18n %} {% load static %} {% load base %} -<!DOCTYPE html> -<html> - <head> - <meta charset="utf-8" /> - <meta name="viewport" content="width=device-width, initial-scale=1"> - <title>403 Forbidden</title> - <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet"> - <link rel="stylesheet" href="{% static 'css/main.css' %}" /> - </head> - <body> - <div class="container-fluid"> - <div class="row"> - <div class="col-md-8"> - <div class="page-header"> - <h1>403 Forbidden</h1> - </div> - {% include "_messages.html" %} - <pre> - | v - | /\ | ,. - ^\_("))_/| ´ . - /{%} \ | ` - /______\ ,' - ,' ^ ^ ,' -; :</pre> - </div> - </div> - </div> - {% tracking_html %} - </body> -</html> +{% block content %} +<div class="container-fluid"> + <div class="row"> + <div class="col-md-8"> + </div> + </div> +</div> +{% endblock %}
paperless-ngx__paperless-ngx-6608
[BUG] Server Error 500 when granting History All ### Description I just tried to create a user group, granting History All to that group. Pressing save in the dialog results in an Internal Server error. I am running a slightly customized ARM64 image, which includes zxing-cpp. I don't believe this is related to the issue. ### Steps to reproduce 1. Go To "Users & Groups" 2. Create a new group 3. Grant "History All" to that Group 4. Press Save ### Webserver logs ```bash [2024-05-07 13:25:27,270] [ERROR] [django.request] Internal Server Error: /api/groups/34/ Traceback (most recent call last): File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 518, in thread_handler raise exc_info[1] File "/usr/local/lib/python3.11/site-packages/django/core/handlers/exception.py", line 42, in inner response = await get_response(request) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 518, in thread_handler raise exc_info[1] File "/usr/local/lib/python3.11/site-packages/django/core/handlers/base.py", line 253, in _get_response_async response = await wrapped_callback( ^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 468, in __call__ ret = await asyncio.shield(exec_coro) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/asgiref/current_thread_executor.py", line 40, in run result = self.fn(*self.args, **self.kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 522, in thread_handler return func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/django/views/decorators/csrf.py", line 56, in wrapper_view return view_func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/viewsets.py", line 125, in view return self.dispatch(request, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 509, in dispatch response = self.handle_exception(exc) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 469, in handle_exception self.raise_uncaught_exception(exc) File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception raise exc File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 506, in dispatch response = handler(request, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/mixins.py", line 67, in update serializer.is_valid(raise_exception=True) File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 227, in is_valid self._validated_data = self.run_validation(self.initial_data) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 426, in run_validation value = self.to_internal_value(data) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 483, in to_internal_value validated_value = field.run_validation(primitive_value) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/fields.py", line 547, in run_validation value = self.to_internal_value(data) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 528, in to_internal_value return [ ^ File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 529, in <listcomp> self.child_relation.to_internal_value(item) File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 462, in to_internal_value return queryset.get(**{self.slug_field: data}) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/django/db/models/query.py", line 640, in get raise self.model.MultipleObjectsReturned( django.contrib.auth.models.Permission.MultipleObjectsReturned: get() returned more than one Permission -- it returned 2! [2024-05-07 13:25:27,425] [ERROR] [django.request] Internal Server Error: /api/groups/34/ Traceback (most recent call last): File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 518, in thread_handler raise exc_info[1] File "/usr/local/lib/python3.11/site-packages/django/core/handlers/exception.py", line 42, in inner response = await get_response(request) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 518, in thread_handler raise exc_info[1] File "/usr/local/lib/python3.11/site-packages/django/core/handlers/base.py", line 253, in _get_response_async response = await wrapped_callback( ^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 468, in __call__ ret = await asyncio.shield(exec_coro) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/asgiref/current_thread_executor.py", line 40, in run result = self.fn(*self.args, **self.kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/asgiref/sync.py", line 522, in thread_handler return func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/django/views/decorators/csrf.py", line 56, in wrapper_view return view_func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/viewsets.py", line 125, in view return self.dispatch(request, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 509, in dispatch response = self.handle_exception(exc) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 469, in handle_exception self.raise_uncaught_exception(exc) File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception raise exc File "/usr/local/lib/python3.11/site-packages/rest_framework/views.py", line 506, in dispatch response = handler(request, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/mixins.py", line 67, in update serializer.is_valid(raise_exception=True) File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 227, in is_valid self._validated_data = self.run_validation(self.initial_data) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 426, in run_validation value = self.to_internal_value(data) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/serializers.py", line 483, in to_internal_value validated_value = field.run_validation(primitive_value) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/fields.py", line 547, in run_validation value = self.to_internal_value(data) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 528, in to_internal_value return [ ^ File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 529, in <listcomp> self.child_relation.to_internal_value(item) File "/usr/local/lib/python3.11/site-packages/rest_framework/relations.py", line 462, in to_internal_value return queryset.get(**{self.slug_field: data}) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/django/db/models/query.py", line 640, in get raise self.model.MultipleObjectsReturned( django.contrib.auth.models.Permission.MultipleObjectsReturned: get() returned more than one Permission -- it returned 2! ``` ### Browser logs _No response_ ### Paperless-ngx version 2.8.1 ### Host OS Kubernetes ### Installation method Other (please describe above) ### Browser Arc ### Configuration changes _No response_ ### Other _No response_ ### Please confirm the following - [X] I believe this issue is a bug that affects all users of Paperless-ngx, not something specific to my installation. - [X] I have already searched for relevant existing issues and discussions before opening this report. - [X] I have updated the title field above with a concise description.
[ { "content": "import logging\n\nfrom allauth.socialaccount.models import SocialAccount\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\n\nfrom paperless.models import ApplicationConfiguration\n\nlogger = logging.getLogger(\"paperless.settings\")\n\n\nclass ObfuscatedUserPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"**********\" if len(value) > 0 else \"\"\n\n def to_internal_value(self, data):\n return data\n\n\nclass UserSerializer(serializers.ModelSerializer):\n password = ObfuscatedUserPasswordField(required=False)\n user_permissions = serializers.SlugRelatedField(\n many=True,\n queryset=Permission.objects.exclude(content_type__app_label=\"admin\"),\n slug_field=\"codename\",\n required=False,\n )\n inherited_permissions = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = (\n \"id\",\n \"username\",\n \"email\",\n \"password\",\n \"first_name\",\n \"last_name\",\n \"date_joined\",\n \"is_staff\",\n \"is_active\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n \"inherited_permissions\",\n )\n\n def get_inherited_permissions(self, obj):\n return obj.get_group_permissions()\n\n def update(self, instance, validated_data):\n if \"password\" in validated_data:\n if len(validated_data.get(\"password\").replace(\"*\", \"\")) > 0:\n instance.set_password(validated_data.get(\"password\"))\n instance.save()\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n groups = None\n if \"groups\" in validated_data:\n groups = validated_data.pop(\"groups\")\n user_permissions = None\n if \"user_permissions\" in validated_data:\n user_permissions = validated_data.pop(\"user_permissions\")\n password = None\n if (\n \"password\" in validated_data\n and len(validated_data.get(\"password\").replace(\"*\", \"\")) > 0\n ):\n password = validated_data.pop(\"password\")\n user = User.objects.create(**validated_data)\n # set groups\n if groups:\n user.groups.set(groups)\n # set permissions\n if user_permissions:\n user.user_permissions.set(user_permissions)\n # set password\n if password:\n user.set_password(password)\n user.save()\n return user\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n permissions = serializers.SlugRelatedField(\n many=True,\n queryset=Permission.objects.all(),\n slug_field=\"codename\",\n )\n\n class Meta:\n model = Group\n fields = (\n \"id\",\n \"name\",\n \"permissions\",\n )\n\n\nclass SocialAccountSerializer(serializers.ModelSerializer):\n name = serializers.SerializerMethodField()\n\n class Meta:\n model = SocialAccount\n fields = (\n \"id\",\n \"provider\",\n \"name\",\n )\n\n def get_name(self, obj):\n return obj.get_provider_account().to_str()\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n email = serializers.EmailField(allow_null=False)\n password = ObfuscatedUserPasswordField(required=False, allow_null=False)\n auth_token = serializers.SlugRelatedField(read_only=True, slug_field=\"key\")\n social_accounts = SocialAccountSerializer(\n many=True,\n read_only=True,\n source=\"socialaccount_set\",\n )\n\n class Meta:\n model = User\n fields = (\n \"email\",\n \"password\",\n \"first_name\",\n \"last_name\",\n \"auth_token\",\n \"social_accounts\",\n \"has_usable_password\",\n )\n\n\nclass ApplicationConfigurationSerializer(serializers.ModelSerializer):\n user_args = serializers.JSONField(binary=True, allow_null=True)\n\n def run_validation(self, data):\n # Empty strings treated as None to avoid unexpected behavior\n if \"user_args\" in data and data[\"user_args\"] == \"\":\n data[\"user_args\"] = None\n if \"language\" in data and data[\"language\"] == \"\":\n data[\"language\"] = None\n return super().run_validation(data)\n\n def update(self, instance, validated_data):\n if instance.app_logo and \"app_logo\" in validated_data:\n instance.app_logo.delete()\n return super().update(instance, validated_data)\n\n class Meta:\n model = ApplicationConfiguration\n fields = \"__all__\"\n", "path": "src/paperless/serialisers.py" } ]
[ { "content": "import logging\n\nfrom allauth.socialaccount.models import SocialAccount\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\n\nfrom paperless.models import ApplicationConfiguration\n\nlogger = logging.getLogger(\"paperless.settings\")\n\n\nclass ObfuscatedUserPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"**********\" if len(value) > 0 else \"\"\n\n def to_internal_value(self, data):\n return data\n\n\nclass UserSerializer(serializers.ModelSerializer):\n password = ObfuscatedUserPasswordField(required=False)\n user_permissions = serializers.SlugRelatedField(\n many=True,\n queryset=Permission.objects.exclude(content_type__app_label=\"admin\"),\n slug_field=\"codename\",\n required=False,\n )\n inherited_permissions = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = (\n \"id\",\n \"username\",\n \"email\",\n \"password\",\n \"first_name\",\n \"last_name\",\n \"date_joined\",\n \"is_staff\",\n \"is_active\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n \"inherited_permissions\",\n )\n\n def get_inherited_permissions(self, obj):\n return obj.get_group_permissions()\n\n def update(self, instance, validated_data):\n if \"password\" in validated_data:\n if len(validated_data.get(\"password\").replace(\"*\", \"\")) > 0:\n instance.set_password(validated_data.get(\"password\"))\n instance.save()\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n groups = None\n if \"groups\" in validated_data:\n groups = validated_data.pop(\"groups\")\n user_permissions = None\n if \"user_permissions\" in validated_data:\n user_permissions = validated_data.pop(\"user_permissions\")\n password = None\n if (\n \"password\" in validated_data\n and len(validated_data.get(\"password\").replace(\"*\", \"\")) > 0\n ):\n password = validated_data.pop(\"password\")\n user = User.objects.create(**validated_data)\n # set groups\n if groups:\n user.groups.set(groups)\n # set permissions\n if user_permissions:\n user.user_permissions.set(user_permissions)\n # set password\n if password:\n user.set_password(password)\n user.save()\n return user\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n permissions = serializers.SlugRelatedField(\n many=True,\n queryset=Permission.objects.exclude(content_type__app_label=\"admin\"),\n slug_field=\"codename\",\n )\n\n class Meta:\n model = Group\n fields = (\n \"id\",\n \"name\",\n \"permissions\",\n )\n\n\nclass SocialAccountSerializer(serializers.ModelSerializer):\n name = serializers.SerializerMethodField()\n\n class Meta:\n model = SocialAccount\n fields = (\n \"id\",\n \"provider\",\n \"name\",\n )\n\n def get_name(self, obj):\n return obj.get_provider_account().to_str()\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n email = serializers.EmailField(allow_null=False)\n password = ObfuscatedUserPasswordField(required=False, allow_null=False)\n auth_token = serializers.SlugRelatedField(read_only=True, slug_field=\"key\")\n social_accounts = SocialAccountSerializer(\n many=True,\n read_only=True,\n source=\"socialaccount_set\",\n )\n\n class Meta:\n model = User\n fields = (\n \"email\",\n \"password\",\n \"first_name\",\n \"last_name\",\n \"auth_token\",\n \"social_accounts\",\n \"has_usable_password\",\n )\n\n\nclass ApplicationConfigurationSerializer(serializers.ModelSerializer):\n user_args = serializers.JSONField(binary=True, allow_null=True)\n\n def run_validation(self, data):\n # Empty strings treated as None to avoid unexpected behavior\n if \"user_args\" in data and data[\"user_args\"] == \"\":\n data[\"user_args\"] = None\n if \"language\" in data and data[\"language\"] == \"\":\n data[\"language\"] = None\n return super().run_validation(data)\n\n def update(self, instance, validated_data):\n if instance.app_logo and \"app_logo\" in validated_data:\n instance.app_logo.delete()\n return super().update(instance, validated_data)\n\n class Meta:\n model = ApplicationConfiguration\n fields = \"__all__\"\n", "path": "src/paperless/serialisers.py" } ]
diff --git a/src/paperless/serialisers.py b/src/paperless/serialisers.py index 8af19d53011..52f9e2b33d1 100644 --- a/src/paperless/serialisers.py +++ b/src/paperless/serialisers.py @@ -93,7 +93,7 @@ def create(self, validated_data): class GroupSerializer(serializers.ModelSerializer): permissions = serializers.SlugRelatedField( many=True, - queryset=Permission.objects.all(), + queryset=Permission.objects.exclude(content_type__app_label="admin"), slug_field="codename", )
saulpw__visidata-1722
[sheets] cursorDisplay for the sheet's own cursorDisplay crashes in Python 3.8.0 through 3.9.9 **Small description** In SheetsSheet, viewing the cursorDisplay cell for the SheetsSheet leads to infinite recursion, which crashes on versions of Python >= 3.8.0 and <= 3.9.9 **Expected result** The cursorDisplay should be blank or show a null. **Actual result with screenshot** ``` Fatal Python error: _Py_CheckRecursiveCall: Cannot recover from stack overflow. Python runtime state: initialized Current thread 0x00007f5f79c5fb80 (most recent call first): File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/column.py", line 308 in getValue File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/wrappers.py", line 108 in wrapply File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/column.py", line 327 in getCell File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/column.py", line 388 in getDisplayValue File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/sheets.py", line 453 in cursorDisplay File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/column.py", line 460 in getattrdeep File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/column.py", line 497 in <lambda> File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/column.py", line 270 in calcValue File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/column.py", line 308 in getValue File "/home/midichef/.pyenv/versions/3.9.9/lib/python3.9/site-packages/visidata/wrappers.py", line 108 in wrapply ... ``` **Steps to reproduce with sample data and a .vd** Open the SheetsSheet and move to the cursorDisplay column, or run: `vd -p cursordisplay.vdj` [cursordisplay.vdj.txt](https://github.com/saulpw/visidata/files/10484256/cursordisplay.vdj.txt) **Additional context** saul.pw/VisiData v2.11 I tested the behavior in different versions of Python: 3.7: no crashes in any version tested: 3.7.0, 3.7.4, 3.7.8, 3.7.12 or 3.7.16. 3.8: crashes in all versions tested: 3.8.0, 3.8.4, 3.8.8, 3.8.10, 3.8.16 3.9: crashes all minor versions from 3.9.0 through 3.9.9 (except 3.9.3 which I did not test), does not crash any from 3.9.10 through 3.9.16 3.10: no crash in any version tested: 3.10.0, 3.10.9 3.11: no crash in 3.11.0 Judging by the [Python 3.9.10 changelog](https://docs.python.org/release/3.9.10/whatsnew/changelog.html#changelog) the change in behavior for 3.9.10 is is likely related to [a Python bugfix allowing recovery from stack overflow](https://bugs.python.org/issue45806). This bug will affect people running the 20.04 Ubuntu LTS release, where the default Python version is 3.8.10.
[ { "content": "from copy import copy\nimport collections\nimport string\nimport itertools\nimport threading\nimport re\nimport time\nimport json\n\nfrom visidata import options, anytype, stacktrace, vd\nfrom visidata import asyncthread, dispwidth, clipstr, iterchars\nfrom visidata import wrapply, TypedWrapper, TypedExceptionWrapper\nfrom visidata import Extensible, AttrDict, undoAttrFunc\n\nclass InProgress(Exception):\n @property\n def stacktrace(self):\n return ['calculation in progress']\n\nINPROGRESS = TypedExceptionWrapper(None, exception=InProgress()) # sentinel\n\nvd.option('col_cache_size', 0, 'max number of cache entries in each cached column')\nvd.option('clean_names', False, 'clean column/sheet names to be valid Python identifiers', replay=True)\nvd.option('disp_formatter', 'generic', 'formatter to use for display and saving', replay=True)\n\n__all__ = [\n 'clean_to_id',\n 'Column',\n 'setitem',\n 'getattrdeep',\n 'setattrdeep',\n 'getitemdef',\n 'ColumnAttr', 'AttrColumn',\n 'ColumnItem', 'ItemColumn',\n 'SettableColumn',\n 'SubColumnFunc',\n 'SubColumnItem',\n 'SubColumnAttr',\n 'ColumnExpr', 'ExprColumn',\n 'DisplayWrapper',\n]\n\n\nclass DisplayWrapper:\n def __init__(self, value=None, *, display=None, note=None, notecolor=None, error=None):\n self.value = value # actual value (any type)\n self.display = display # displayed string\n self.note = note # single unicode character displayed in cell far right\n self.notecolor = notecolor # configurable color name (like 'color_warning')\n self.error = error # list of strings for stacktrace\n\n def __bool__(self):\n return bool(self.value)\n\n def __eq__(self, other):\n return self.value == other\n\n\ndef clean_to_id(s): # [Nas Banov] https://stackoverflow.com/a/3305731\n return re.sub(r'\\W|^(?=\\d)', '_', str(s)).strip('_')\n\n\ndef _default_colnames():\n 'A B C .. Z AA AB .. ZZ AAA .. to infinity'\n i=0\n while True:\n i += 1\n for x in itertools.product(string.ascii_uppercase, repeat=i):\n yield ''.join(x)\n\ndefault_colnames = _default_colnames()\n\n\nclass Column(Extensible):\n '''Base class for all column types.\n\n - *name*: name of this column.\n - *type*: ``anytype str int float date`` or other type-like conversion function.\n - *cache*: cache behavior\n\n - ``False`` (default): getValue never caches; calcValue is always called.\n - ``True``: getValue maintains a cache of ``options.col_cache_size``.\n - ``\"async\"``: ``getValue`` launches thread for every uncached result, returns invalid value until cache entry available.\n - *width*: == 0 if hidden, None if auto-compute next time.\n - *height*: max height, None/0 to auto-compute for each row.\n - *fmtstr*: format string as applied by column type.\n - *getter*: default calcValue calls ``getter(col, row)``.\n - *setter*: default putValue calls ``setter(col, row, val)``.\n - *kwargs*: other attributes to be set on this column.\n '''\n def __init__(self, name=None, *, type=anytype, cache=False, **kwargs):\n self.sheet = None # owning Sheet, set in .recalc() via Sheet.addColumn\n if name is None:\n name = next(default_colnames)\n self.name = str(name) # display visible name\n self.fmtstr = '' # by default, use str()\n self._type = type # anytype/str/int/float/date/func\n self.getter = lambda col, row: row\n self.setter = lambda col, row, value: vd.fail(col.name+' column cannot be changed')\n self._width = None # == 0 if hidden, None if auto-compute next time\n self.hoffset = 0 # starting horizontal (char) offset of displayed column value\n self.voffset = 0 # starting vertical (line) offset of displayed column value\n self.height = 1 # max height, None/0 to auto-compute for each row\n self.keycol = 0 # keycol index (or 0 if not key column)\n self.expr = None # Column-type-dependent parameter\n self.formatter = ''\n self.defer = False\n\n self.setCache(cache)\n for k, v in kwargs.items():\n setattr(self, k, v) # instead of __dict__.update(kwargs) to invoke property.setters\n\n def __copy__(self):\n cls = self.__class__\n ret = cls.__new__(cls)\n ret.__dict__.update(self.__dict__)\n ret.keycol = 0 # column copies lose their key status\n if self._cachedValues is not None:\n ret._cachedValues = collections.OrderedDict() # an unrelated cache for copied columns\n return ret\n\n def __deepcopy__(self, memo):\n return self.__copy__() # no separate deepcopy\n\n def __getstate__(self):\n d = {k:getattr(self, k) for k in 'name width height expr keycol formatter fmtstr voffset hoffset aggstr'.split() if hasattr(self, k)}\n d['type'] = self.type.__name__\n return d\n\n def __setstate__(self, d):\n for attr, v in d.items():\n setattr(self, attr, v)\n\n def recalc(self, sheet=None):\n 'Reset column cache, attach column to *sheet*, and reify column name.'\n if self._cachedValues:\n self._cachedValues.clear()\n if sheet:\n self.sheet = sheet\n self.name = self._name\n\n @property\n def name(self):\n 'Name of this column.'\n return self._name or ''\n\n @name.setter\n def name(self, name):\n if name is None:\n name = ''\n if isinstance(name, str):\n name = name.strip()\n else:\n name = str(name)\n\n if self.sheet:\n name = self.sheet.maybeClean(name)\n\n self._name = name\n\n @property\n def typestr(self):\n 'Type of this column as string.'\n return self._type.__name__\n\n @typestr.setter\n def typestr(self, v):\n self.type = vd.getGlobals()[v or 'anytype']\n\n @property\n def type(self):\n 'Type of this column.'\n return self._type\n\n @type.setter\n def type(self, t):\n if self._type != t:\n vd.addUndo(setattr, self, '_type', self.type)\n if not t:\n self._type = anytype\n elif isinstance(t, str):\n self.typestr = t\n else:\n self._type = t\n\n @property\n def width(self):\n 'Width of this column in characters. 0 or negative means hidden. None means not-yet-autocomputed.'\n return self._width\n\n @width.setter\n def width(self, w):\n if self.width != w:\n if self.width == 0 or w == 0: # hide/unhide\n vd.addUndo(setattr, self, '_width', self.width)\n self._width = w\n\n @property\n def _formatdict(col):\n if '=' in col.fmtstr:\n return dict(val.split('=', maxsplit=1) for val in col.fmtstr.split())\n return {}\n\n @property\n def fmtstr(self):\n 'Format string to use to display this column.'\n return self._fmtstr or vd.getType(self.type).fmtstr\n\n @fmtstr.setter\n def fmtstr(self, v):\n self._fmtstr = v\n\n def _format_len(self, typedval, **kwargs):\n if isinstance(typedval, dict):\n return f'{{{len(typedval)}}}'\n elif isinstance(typedval, (list, tuple)):\n return f'[{len(typedval)}]'\n\n return self.formatValue(typedval, **kwargs)\n\n def formatter_len(self, fmtstr):\n return self._format_len\n\n def formatter_generic(self, fmtstr):\n return self.formatValue\n\n def formatter_json(self, fmtstr):\n return lambda v,*args,**kwargs: json.dumps(v)\n\n def formatter_python(self, fmtstr):\n return lambda v,*args,**kwargs: str(v)\n\n def make_formatter(self):\n 'Return function for format(v) from the current formatter and fmtstr'\n _formatMaker = getattr(self, 'formatter_'+(self.formatter or self.sheet.options.disp_formatter))\n return _formatMaker(self._formatdict)\n\n def format(self, *args, **kwargs):\n return self.make_formatter()(*args, **kwargs)\n\n def formatValue(self, typedval, width=None):\n 'Return displayable string of *typedval* according to ``Column.fmtstr``.'\n if typedval is None:\n return None\n\n if self.type is anytype:\n if isinstance(typedval, (dict, list, tuple)):\n dispval, dispw = clipstr(iterchars(typedval), width)\n return dispval\n\n if isinstance(typedval, bytes):\n typedval = typedval.decode(options.encoding, options.encoding_errors)\n\n return vd.getType(self.type).formatter(self.fmtstr, typedval)\n\n def hide(self, hide=True):\n if hide:\n self.setWidth(0)\n else:\n self.setWidth(abs(self.width or self.getMaxWidth(self.sheet.visibleRows)))\n\n @property\n def hidden(self):\n 'Return True if width of this column is 0 or negative.'\n if self.width is None:\n return False\n return self.width <= 0\n\n def calcValue(self, row):\n 'Calculate and return value for *row* in this column.'\n return (self.getter)(self, row)\n\n def getTypedValue(self, row):\n 'Return the properly-typed value for the given row at this column, or a TypedWrapper object in case of null or error.'\n return wrapply(self.type, wrapply(self.getValue, row))\n\n def setCache(self, cache):\n '''Set cache behavior for this column to *cache*:\n\n - ``False`` (default): getValue never caches; calcValue is always called.\n - ``True``: getValue maintains a cache of ``options.col_cache_size``.\n - ``\"async\"``: ``getValue`` launches thread for every uncached result, maintains cache of infinite size. Returns invalid value until cache entry available.'''\n self.cache = cache\n self._cachedValues = collections.OrderedDict() if self.cache else None\n\n @asyncthread\n def _calcIntoCacheAsync(self, row):\n # causes isues when moved into _calcIntoCache gen case\n self._cachedValues[self.sheet.rowid(row)] = INPROGRESS\n self._calcIntoCache(row)\n\n def _calcIntoCache(self, row):\n ret = wrapply(self.calcValue, row)\n if not isinstance(ret, TypedExceptionWrapper) or ret.val is not INPROGRESS:\n self._cachedValues[self.sheet.rowid(row)] = ret\n return ret\n\n def getValue(self, row):\n 'Return value for *row* in this column, calculating if not cached.'\n\n if self.defer:\n try:\n row, rowmods = self.sheet._deferredMods[self.sheet.rowid(row)]\n return rowmods[self]\n except KeyError:\n pass\n\n if self._cachedValues is None:\n return self.calcValue(row)\n\n k = self.sheet.rowid(row)\n if k in self._cachedValues:\n return self._cachedValues[k]\n\n if self.cache == 'async':\n ret = self._calcIntoCacheAsync(row)\n else:\n ret = self._calcIntoCache(row)\n\n cachesize = options.col_cache_size\n if cachesize > 0 and len(self._cachedValues) > cachesize:\n self._cachedValues.popitem(last=False)\n\n return ret\n\n def getCell(self, row):\n 'Return DisplayWrapper for displayable cell value.'\n cellval = wrapply(self.getValue, row)\n typedval = wrapply(self.type, cellval)\n\n if isinstance(typedval, TypedWrapper):\n if isinstance(cellval, TypedExceptionWrapper): # calc failed\n exc = cellval.exception\n if cellval.forwarded:\n dispval = str(cellval) # traceback.format_exception_only(type(exc), exc)[-1].strip()\n else:\n dispval = options.disp_error_val\n return DisplayWrapper(cellval.val, error=exc.stacktrace,\n display=dispval,\n note=options.note_getter_exc,\n notecolor='color_error')\n elif typedval.val is None: # early out for strict None\n return DisplayWrapper(None, display='', # force empty display for None\n note=options.disp_note_none,\n notecolor='color_note_type')\n elif isinstance(typedval, TypedExceptionWrapper): # calc succeeded, type failed\n return DisplayWrapper(typedval.val, display=str(cellval),\n error=typedval.stacktrace,\n note=options.note_type_exc,\n notecolor='color_warning')\n else:\n return DisplayWrapper(typedval.val, display=str(typedval.val),\n error='unknown',\n note=options.note_type_exc,\n notecolor='color_warning')\n\n elif isinstance(typedval, threading.Thread):\n return DisplayWrapper(None,\n display=options.disp_pending,\n note=options.note_pending,\n notecolor='color_note_pending')\n\n dw = DisplayWrapper(cellval)\n\n try:\n dw.display = self.format(typedval, width=(self.width or 0)*2) or ''\n\n # annotate cells with raw value type in anytype columns, except for strings\n if self.type is anytype and type(cellval) is not str:\n typedesc = vd.typemap.get(type(cellval), None)\n if typedesc:\n dw.note = typedesc.icon\n dw.notecolor = 'color_note_type'\n\n except Exception as e: # formatting failure\n e.stacktrace = stacktrace()\n dw.error = e.stacktrace\n try:\n dw.display = str(cellval)\n except Exception as e:\n dw.display = str(e)\n dw.note = options.note_format_exc\n dw.notecolor = 'color_warning'\n\n return dw\n\n def getDisplayValue(self, row):\n 'Return string displayed in this column for given *row*.'\n return self.getCell(row).display\n\n def putValue(self, row, val):\n 'Change value for *row* in this column to *val* immediately. Does not check the type. Overridable; by default calls ``.setter(row, val)``.'\n return self.setter(self, row, val)\n\n def setValue(self, row, val):\n 'Change value for *row* in this column to *val*. Call ``putValue`` immediately if not a deferred column (added to deferred parent at load-time); otherwise cache until later ``putChanges``. Caller must add undo function.'\n if self.defer:\n self.cellChanged(row, val)\n else:\n self.putValue(row, val)\n self.sheet.setModified()\n\n def setValueSafe(self, row, value):\n 'setValue and ignore exceptions.'\n try:\n return self.setValue(row, value)\n except Exception as e:\n vd.exceptionCaught(e)\n\n @asyncthread\n def setValues(self, rows, *values):\n 'Set values in this column for *rows* to *values*, recycling values as needed to fill *rows*.'\n vd.addUndoSetValues([self], rows)\n for r, v in zip(rows, itertools.cycle(values)):\n self.setValueSafe(r, v)\n self.recalc()\n return vd.status('set %d cells to %d values' % (len(rows), len(values)))\n\n def setValuesTyped(self, rows, *values):\n 'Set values on this column for *rows* to *values*, coerced to column type, recycling values as needed to fill *rows*. Abort on type exception.'\n vd.addUndoSetValues([self], rows)\n for r, v in zip(rows, itertools.cycle(self.type(val) for val in values)):\n self.setValueSafe(r, v)\n\n self.recalc()\n\n return vd.status('set %d cells to %d values' % (len(rows), len(values)))\n\n def getMaxWidth(self, rows):\n 'Return the maximum length of any cell in column or its header (up to window width).'\n w = 0\n nlen = dispwidth(self.name)\n if len(rows) > 0:\n w = max(max(dispwidth(self.getDisplayValue(r), maxwidth=self.sheet.windowWidth) for r in rows), nlen)+2\n return max(w, nlen)\n\n\n# ---- Column makers\n\ndef setitem(r, i, v): # function needed for use in lambda\n r[i] = v\n return True\n\n\ndef getattrdeep(obj, attr, *default, getter=getattr):\n try:\n 'Return dotted attr (like \"a.b.c\") from obj, or default if any of the components are missing.'\n if not isinstance(attr, str):\n return getter(obj, attr, *default)\n\n try: # if attribute exists, return toplevel value, even if dotted\n if attr in obj:\n return getter(obj, attr)\n except Exception as e:\n pass\n\n attrs = attr.split('.')\n for a in attrs[:-1]:\n obj = getter(obj, a)\n\n return getter(obj, attrs[-1])\n except Exception as e:\n if not default: raise\n return default[0]\n\n\ndef setattrdeep(obj, attr, val, getter=getattr, setter=setattr):\n 'Set dotted attr (like \"a.b.c\") on obj to val.'\n if not isinstance(attr, str):\n return setter(obj, attr, val)\n\n try: # if attribute exists, overwrite toplevel value, even if dotted\n getter(obj, attr)\n return setter(obj, attr, val)\n except Exception as e:\n pass\n\n attrs = attr.split('.')\n for a in attrs[:-1]:\n try:\n obj = getter(obj, a)\n except Exception as e:\n obj = obj[a] = type(obj)() # assume homogeneous nesting\n\n setter(obj, attrs[-1], val)\n\n\ndef getitemdeep(obj, k, *default):\n return getattrdeep(obj, k, *default, getter=getitem)\n\ndef setitemdeep(obj, k, val):\n return setattrdeep(obj, k, val, getter=getitemdef, setter=setitem)\n\ndef AttrColumn(name='', attr=None, **kwargs):\n 'Column using getattr/setattr with *attr*.'\n return Column(name,\n expr=attr if attr is not None else name,\n getter=lambda col,row: getattrdeep(row, col.expr),\n setter=lambda col,row,val: setattrdeep(row, col.expr, val),\n **kwargs)\n\ndef getitem(o, k, default=None):\n return default if o is None else o[k]\n\ndef getitemdef(o, k, default=None):\n try:\n return default if o is None else o[k]\n except Exception:\n return default\n\nclass ItemColumn(Column):\n 'Column using getitem/setitem with *key*.'\n def __init__(self, name=None, expr=None, **kwargs):\n super().__init__(name,\n expr=expr if expr is not None else name,\n getter=lambda col,row: getitemdeep(row, col.expr, None),\n setter=lambda col,row,val: setitemdeep(row, col.expr, val),\n **kwargs)\n\n\nclass SubColumnFunc(Column):\n 'Column compositor; preprocess row with *subfunc*(row, *expr*) before passing to *origcol*.getValue and *origcol*.setValue.'\n def __init__(self, name='', origcol=None, expr=None, subfunc=getitemdef, **kwargs):\n super().__init__(name, type=origcol.type, width=origcol.width, expr=expr, **kwargs)\n self.origcol = origcol\n self.subfunc = subfunc\n\n def calcValue(self, row):\n subrow = self.subfunc(row, self.expr)\n if subrow is not None:\n # call getValue to use deferred values from source sheet\n return self.origcol.getValue(subrow)\n\n def putValue(self, row, value):\n subrow = self.subfunc(row, self.expr)\n if subrow is None:\n vd.fail('no source row')\n self.origcol.setValue(subrow, value)\n\n def recalc(self, sheet=None):\n Column.recalc(self, sheet)\n self.origcol.recalc() # reset cache but don't change sheet\n\n\ndef SubColumnAttr(attrname, c, **kwargs):\n if 'name' not in kwargs:\n kwargs['name'] = c.name\n return SubColumnFunc(origcol=c, subfunc=getattrdeep, expr=attrname, **kwargs)\n\ndef SubColumnItem(idx, c, **kwargs):\n if 'name' not in kwargs:\n kwargs['name'] = c.name\n return SubColumnFunc(origcol=c, subfunc=getitemdef, expr=idx, **kwargs)\n\nclass ExprColumn(Column):\n 'Column using *expr* to derive the value from each row.'\n def __init__(self, name, expr=None, **kwargs):\n super().__init__(name, **kwargs)\n self.expr = expr or name\n self.ncalcs = 0\n self.totaltime = 0\n self.maxtime = 0\n\n def calcValue(self, row):\n t0 = time.perf_counter()\n r = self.sheet.evalExpr(self.compiledExpr, row, col=self)\n t1 = time.perf_counter()\n self.ncalcs += 1\n self.maxtime = max(self.maxtime, t1-t0)\n self.totaltime += (t1-t0)\n return r\n\n def putValue(self, row, val):\n a = self.getDisplayValue(row)\n b = self.format(self.type(val))\n if a != b:\n vd.warning('%s calced %s not %s' % (self.name, a, b))\n\n @property\n def expr(self):\n return self._expr\n\n @expr.setter\n def expr(self, expr):\n self.compiledExpr = compile(expr, '<expr>', 'eval') if expr else None\n self._expr = expr\n\n\nclass SettableColumn(Column):\n 'Column using rowid to store and retrieve values internally.'\n def putValue(self, row, value):\n self._store[self.sheet.rowid(row)] = value\n\n def calcValue(self, row):\n return self._store.get(self.sheet.rowid(row), None)\n\n\nSettableColumn.init('_store', dict, copy=True)\n\n\n# synonyms\nColumnItem = ItemColumn\nColumnAttr = AttrColumn\nColumnExpr = ExprColumn\n", "path": "visidata/column.py" } ]
[ { "content": "from copy import copy\nimport collections\nimport string\nimport itertools\nimport threading\nimport re\nimport time\nimport json\n\nfrom visidata import options, anytype, stacktrace, vd\nfrom visidata import asyncthread, dispwidth, clipstr, iterchars\nfrom visidata import wrapply, TypedWrapper, TypedExceptionWrapper\nfrom visidata import Extensible, AttrDict, undoAttrFunc\n\nclass InProgress(Exception):\n @property\n def stacktrace(self):\n return ['calculation in progress']\n\nINPROGRESS = TypedExceptionWrapper(None, exception=InProgress()) # sentinel\n\nvd.option('col_cache_size', 0, 'max number of cache entries in each cached column')\nvd.option('clean_names', False, 'clean column/sheet names to be valid Python identifiers', replay=True)\nvd.option('disp_formatter', 'generic', 'formatter to use for display and saving', replay=True)\n\n__all__ = [\n 'clean_to_id',\n 'Column',\n 'setitem',\n 'getattrdeep',\n 'setattrdeep',\n 'getitemdef',\n 'ColumnAttr', 'AttrColumn',\n 'ColumnItem', 'ItemColumn',\n 'SettableColumn',\n 'SubColumnFunc',\n 'SubColumnItem',\n 'SubColumnAttr',\n 'ColumnExpr', 'ExprColumn',\n 'DisplayWrapper',\n]\n\n\nclass DisplayWrapper:\n def __init__(self, value=None, *, display=None, note=None, notecolor=None, error=None):\n self.value = value # actual value (any type)\n self.display = display # displayed string\n self.note = note # single unicode character displayed in cell far right\n self.notecolor = notecolor # configurable color name (like 'color_warning')\n self.error = error # list of strings for stacktrace\n\n def __bool__(self):\n return bool(self.value)\n\n def __eq__(self, other):\n return self.value == other\n\n\ndef clean_to_id(s): # [Nas Banov] https://stackoverflow.com/a/3305731\n return re.sub(r'\\W|^(?=\\d)', '_', str(s)).strip('_')\n\n\ndef _default_colnames():\n 'A B C .. Z AA AB .. ZZ AAA .. to infinity'\n i=0\n while True:\n i += 1\n for x in itertools.product(string.ascii_uppercase, repeat=i):\n yield ''.join(x)\n\ndefault_colnames = _default_colnames()\n\n\nclass Column(Extensible):\n '''Base class for all column types.\n\n - *name*: name of this column.\n - *type*: ``anytype str int float date`` or other type-like conversion function.\n - *cache*: cache behavior\n\n - ``False`` (default): getValue never caches; calcValue is always called.\n - ``True``: getValue maintains a cache of ``options.col_cache_size``.\n - ``\"async\"``: ``getValue`` launches thread for every uncached result, returns invalid value until cache entry available.\n - *width*: == 0 if hidden, None if auto-compute next time.\n - *height*: max height, None/0 to auto-compute for each row.\n - *fmtstr*: format string as applied by column type.\n - *getter*: default calcValue calls ``getter(col, row)``.\n - *setter*: default putValue calls ``setter(col, row, val)``.\n - *kwargs*: other attributes to be set on this column.\n '''\n def __init__(self, name=None, *, type=anytype, cache=False, **kwargs):\n self.sheet = None # owning Sheet, set in .recalc() via Sheet.addColumn\n if name is None:\n name = next(default_colnames)\n self.name = str(name) # display visible name\n self.fmtstr = '' # by default, use str()\n self._type = type # anytype/str/int/float/date/func\n self.getter = lambda col, row: row\n self.setter = lambda col, row, value: vd.fail(col.name+' column cannot be changed')\n self._width = None # == 0 if hidden, None if auto-compute next time\n self.hoffset = 0 # starting horizontal (char) offset of displayed column value\n self.voffset = 0 # starting vertical (line) offset of displayed column value\n self.height = 1 # max height, None/0 to auto-compute for each row\n self.keycol = 0 # keycol index (or 0 if not key column)\n self.expr = None # Column-type-dependent parameter\n self.formatter = ''\n self.defer = False\n\n self.setCache(cache)\n for k, v in kwargs.items():\n setattr(self, k, v) # instead of __dict__.update(kwargs) to invoke property.setters\n\n def __copy__(self):\n cls = self.__class__\n ret = cls.__new__(cls)\n ret.__dict__.update(self.__dict__)\n ret.keycol = 0 # column copies lose their key status\n if self._cachedValues is not None:\n ret._cachedValues = collections.OrderedDict() # an unrelated cache for copied columns\n return ret\n\n def __deepcopy__(self, memo):\n return self.__copy__() # no separate deepcopy\n\n def __getstate__(self):\n d = {k:getattr(self, k) for k in 'name width height expr keycol formatter fmtstr voffset hoffset aggstr'.split() if hasattr(self, k)}\n d['type'] = self.type.__name__\n return d\n\n def __setstate__(self, d):\n for attr, v in d.items():\n setattr(self, attr, v)\n\n def recalc(self, sheet=None):\n 'Reset column cache, attach column to *sheet*, and reify column name.'\n if self._cachedValues:\n self._cachedValues.clear()\n if sheet:\n self.sheet = sheet\n self.name = self._name\n\n @property\n def name(self):\n 'Name of this column.'\n return self._name or ''\n\n @name.setter\n def name(self, name):\n if name is None:\n name = ''\n if isinstance(name, str):\n name = name.strip()\n else:\n name = str(name)\n\n if self.sheet:\n name = self.sheet.maybeClean(name)\n\n self._name = name\n\n @property\n def typestr(self):\n 'Type of this column as string.'\n return self._type.__name__\n\n @typestr.setter\n def typestr(self, v):\n self.type = vd.getGlobals()[v or 'anytype']\n\n @property\n def type(self):\n 'Type of this column.'\n return self._type\n\n @type.setter\n def type(self, t):\n if self._type != t:\n vd.addUndo(setattr, self, '_type', self.type)\n if not t:\n self._type = anytype\n elif isinstance(t, str):\n self.typestr = t\n else:\n self._type = t\n\n @property\n def width(self):\n 'Width of this column in characters. 0 or negative means hidden. None means not-yet-autocomputed.'\n return self._width\n\n @width.setter\n def width(self, w):\n if self.width != w:\n if self.width == 0 or w == 0: # hide/unhide\n vd.addUndo(setattr, self, '_width', self.width)\n self._width = w\n\n @property\n def _formatdict(col):\n if '=' in col.fmtstr:\n return dict(val.split('=', maxsplit=1) for val in col.fmtstr.split())\n return {}\n\n @property\n def fmtstr(self):\n 'Format string to use to display this column.'\n return self._fmtstr or vd.getType(self.type).fmtstr\n\n @fmtstr.setter\n def fmtstr(self, v):\n self._fmtstr = v\n\n def _format_len(self, typedval, **kwargs):\n if isinstance(typedval, dict):\n return f'{{{len(typedval)}}}'\n elif isinstance(typedval, (list, tuple)):\n return f'[{len(typedval)}]'\n\n return self.formatValue(typedval, **kwargs)\n\n def formatter_len(self, fmtstr):\n return self._format_len\n\n def formatter_generic(self, fmtstr):\n return self.formatValue\n\n def formatter_json(self, fmtstr):\n return lambda v,*args,**kwargs: json.dumps(v)\n\n def formatter_python(self, fmtstr):\n return lambda v,*args,**kwargs: str(v)\n\n def make_formatter(self):\n 'Return function for format(v) from the current formatter and fmtstr'\n _formatMaker = getattr(self, 'formatter_'+(self.formatter or self.sheet.options.disp_formatter))\n return _formatMaker(self._formatdict)\n\n def format(self, *args, **kwargs):\n return self.make_formatter()(*args, **kwargs)\n\n def formatValue(self, typedval, width=None):\n 'Return displayable string of *typedval* according to ``Column.fmtstr``.'\n if typedval is None:\n return None\n\n if self.type is anytype:\n if isinstance(typedval, (dict, list, tuple)):\n dispval, dispw = clipstr(iterchars(typedval), width)\n return dispval\n\n if isinstance(typedval, bytes):\n typedval = typedval.decode(options.encoding, options.encoding_errors)\n\n return vd.getType(self.type).formatter(self.fmtstr, typedval)\n\n def hide(self, hide=True):\n if hide:\n self.setWidth(0)\n else:\n self.setWidth(abs(self.width or self.getMaxWidth(self.sheet.visibleRows)))\n\n @property\n def hidden(self):\n 'Return True if width of this column is 0 or negative.'\n if self.width is None:\n return False\n return self.width <= 0\n\n def calcValue(self, row):\n 'Calculate and return value for *row* in this column.'\n return (self.getter)(self, row)\n\n def getTypedValue(self, row):\n 'Return the properly-typed value for the given row at this column, or a TypedWrapper object in case of null or error.'\n return wrapply(self.type, wrapply(self.getValue, row))\n\n def setCache(self, cache):\n '''Set cache behavior for this column to *cache*:\n\n - ``False`` (default): getValue never caches; calcValue is always called.\n - ``True``: getValue maintains a cache of ``options.col_cache_size``.\n - ``\"async\"``: ``getValue`` launches thread for every uncached result, maintains cache of infinite size. Returns invalid value until cache entry available.'''\n self.cache = cache\n self._cachedValues = collections.OrderedDict() if self.cache else None\n\n @asyncthread\n def _calcIntoCacheAsync(self, row):\n # causes isues when moved into _calcIntoCache gen case\n self._cachedValues[self.sheet.rowid(row)] = INPROGRESS\n self._calcIntoCache(row)\n\n def _calcIntoCache(self, row):\n ret = wrapply(self.calcValue, row)\n if not isinstance(ret, TypedExceptionWrapper) or ret.val is not INPROGRESS:\n self._cachedValues[self.sheet.rowid(row)] = ret\n return ret\n\n def getValue(self, row):\n 'Return value for *row* in this column, calculating if not cached.'\n\n if self.defer:\n try:\n row, rowmods = self.sheet._deferredMods[self.sheet.rowid(row)]\n return rowmods[self]\n except KeyError:\n pass\n\n if self._cachedValues is None:\n return self.calcValue(row)\n\n k = self.sheet.rowid(row)\n if k in self._cachedValues:\n return self._cachedValues[k]\n\n if self.cache == 'async':\n ret = self._calcIntoCacheAsync(row)\n else:\n ret = self._calcIntoCache(row)\n\n cachesize = options.col_cache_size\n if cachesize > 0 and len(self._cachedValues) > cachesize:\n self._cachedValues.popitem(last=False)\n\n return ret\n\n def getCell(self, row):\n 'Return DisplayWrapper for displayable cell value.'\n cellval = wrapply(self.getValue, row)\n typedval = wrapply(self.type, cellval)\n\n if isinstance(typedval, TypedWrapper):\n if isinstance(cellval, TypedExceptionWrapper): # calc failed\n exc = cellval.exception\n if cellval.forwarded:\n dispval = str(cellval) # traceback.format_exception_only(type(exc), exc)[-1].strip()\n else:\n dispval = options.disp_error_val\n return DisplayWrapper(cellval.val, error=exc.stacktrace,\n display=dispval,\n note=options.note_getter_exc,\n notecolor='color_error')\n elif typedval.val is None: # early out for strict None\n return DisplayWrapper(None, display='', # force empty display for None\n note=options.disp_note_none,\n notecolor='color_note_type')\n elif isinstance(typedval, TypedExceptionWrapper): # calc succeeded, type failed\n return DisplayWrapper(typedval.val, display=str(cellval),\n error=typedval.stacktrace,\n note=options.note_type_exc,\n notecolor='color_warning')\n else:\n return DisplayWrapper(typedval.val, display=str(typedval.val),\n error='unknown',\n note=options.note_type_exc,\n notecolor='color_warning')\n\n elif isinstance(typedval, threading.Thread):\n return DisplayWrapper(None,\n display=options.disp_pending,\n note=options.note_pending,\n notecolor='color_note_pending')\n\n dw = DisplayWrapper(cellval)\n\n try:\n dw.display = self.format(typedval, width=(self.width or 0)*2) or ''\n\n # annotate cells with raw value type in anytype columns, except for strings\n if self.type is anytype and type(cellval) is not str:\n typedesc = vd.typemap.get(type(cellval), None)\n if typedesc:\n dw.note = typedesc.icon\n dw.notecolor = 'color_note_type'\n\n except Exception as e: # formatting failure\n e.stacktrace = stacktrace()\n dw.error = e.stacktrace\n try:\n dw.display = str(cellval)\n except Exception as e:\n dw.display = str(e)\n dw.note = options.note_format_exc\n dw.notecolor = 'color_warning'\n\n return dw\n\n def getDisplayValue(self, row):\n 'Return string displayed in this column for given *row*.'\n return self.getCell(row).display\n\n def putValue(self, row, val):\n 'Change value for *row* in this column to *val* immediately. Does not check the type. Overridable; by default calls ``.setter(row, val)``.'\n return self.setter(self, row, val)\n\n def setValue(self, row, val):\n 'Change value for *row* in this column to *val*. Call ``putValue`` immediately if not a deferred column (added to deferred parent at load-time); otherwise cache until later ``putChanges``. Caller must add undo function.'\n if self.defer:\n self.cellChanged(row, val)\n else:\n self.putValue(row, val)\n self.sheet.setModified()\n\n def setValueSafe(self, row, value):\n 'setValue and ignore exceptions.'\n try:\n return self.setValue(row, value)\n except Exception as e:\n vd.exceptionCaught(e)\n\n @asyncthread\n def setValues(self, rows, *values):\n 'Set values in this column for *rows* to *values*, recycling values as needed to fill *rows*.'\n vd.addUndoSetValues([self], rows)\n for r, v in zip(rows, itertools.cycle(values)):\n self.setValueSafe(r, v)\n self.recalc()\n return vd.status('set %d cells to %d values' % (len(rows), len(values)))\n\n def setValuesTyped(self, rows, *values):\n 'Set values on this column for *rows* to *values*, coerced to column type, recycling values as needed to fill *rows*. Abort on type exception.'\n vd.addUndoSetValues([self], rows)\n for r, v in zip(rows, itertools.cycle(self.type(val) for val in values)):\n self.setValueSafe(r, v)\n\n self.recalc()\n\n return vd.status('set %d cells to %d values' % (len(rows), len(values)))\n\n def getMaxWidth(self, rows):\n 'Return the maximum length of any cell in column or its header (up to window width).'\n w = 0\n nlen = dispwidth(self.name)\n if len(rows) > 0:\n w = max(max(dispwidth(self.getDisplayValue(r), maxwidth=self.sheet.windowWidth) for r in rows), nlen)+2\n return max(w, nlen)\n\n\n# ---- Column makers\n\ndef setitem(r, i, v): # function needed for use in lambda\n r[i] = v\n return True\n\n\ndef getattrdeep(obj, attr, *default, getter=getattr):\n try:\n 'Return dotted attr (like \"a.b.c\") from obj, or default if any of the components are missing.'\n if not isinstance(attr, str):\n return getter(obj, attr, *default)\n\n try: # if attribute exists, return toplevel value, even if dotted\n if attr in obj:\n return getter(obj, attr)\n except RecursionError: #1696\n raise\n except Exception as e:\n pass\n\n attrs = attr.split('.')\n for a in attrs[:-1]:\n obj = getter(obj, a)\n\n return getter(obj, attrs[-1])\n except Exception as e:\n if not default: raise\n return default[0]\n\n\ndef setattrdeep(obj, attr, val, getter=getattr, setter=setattr):\n 'Set dotted attr (like \"a.b.c\") on obj to val.'\n if not isinstance(attr, str):\n return setter(obj, attr, val)\n\n try: # if attribute exists, overwrite toplevel value, even if dotted\n getter(obj, attr)\n return setter(obj, attr, val)\n except Exception as e:\n pass\n\n attrs = attr.split('.')\n for a in attrs[:-1]:\n try:\n obj = getter(obj, a)\n except Exception as e:\n obj = obj[a] = type(obj)() # assume homogeneous nesting\n\n setter(obj, attrs[-1], val)\n\n\ndef getitemdeep(obj, k, *default):\n return getattrdeep(obj, k, *default, getter=getitem)\n\ndef setitemdeep(obj, k, val):\n return setattrdeep(obj, k, val, getter=getitemdef, setter=setitem)\n\ndef AttrColumn(name='', attr=None, **kwargs):\n 'Column using getattr/setattr with *attr*.'\n return Column(name,\n expr=attr if attr is not None else name,\n getter=lambda col,row: getattrdeep(row, col.expr),\n setter=lambda col,row,val: setattrdeep(row, col.expr, val),\n **kwargs)\n\ndef getitem(o, k, default=None):\n return default if o is None else o[k]\n\ndef getitemdef(o, k, default=None):\n try:\n return default if o is None else o[k]\n except Exception:\n return default\n\nclass ItemColumn(Column):\n 'Column using getitem/setitem with *key*.'\n def __init__(self, name=None, expr=None, **kwargs):\n super().__init__(name,\n expr=expr if expr is not None else name,\n getter=lambda col,row: getitemdeep(row, col.expr, None),\n setter=lambda col,row,val: setitemdeep(row, col.expr, val),\n **kwargs)\n\n\nclass SubColumnFunc(Column):\n 'Column compositor; preprocess row with *subfunc*(row, *expr*) before passing to *origcol*.getValue and *origcol*.setValue.'\n def __init__(self, name='', origcol=None, expr=None, subfunc=getitemdef, **kwargs):\n super().__init__(name, type=origcol.type, width=origcol.width, expr=expr, **kwargs)\n self.origcol = origcol\n self.subfunc = subfunc\n\n def calcValue(self, row):\n subrow = self.subfunc(row, self.expr)\n if subrow is not None:\n # call getValue to use deferred values from source sheet\n return self.origcol.getValue(subrow)\n\n def putValue(self, row, value):\n subrow = self.subfunc(row, self.expr)\n if subrow is None:\n vd.fail('no source row')\n self.origcol.setValue(subrow, value)\n\n def recalc(self, sheet=None):\n Column.recalc(self, sheet)\n self.origcol.recalc() # reset cache but don't change sheet\n\n\ndef SubColumnAttr(attrname, c, **kwargs):\n if 'name' not in kwargs:\n kwargs['name'] = c.name\n return SubColumnFunc(origcol=c, subfunc=getattrdeep, expr=attrname, **kwargs)\n\ndef SubColumnItem(idx, c, **kwargs):\n if 'name' not in kwargs:\n kwargs['name'] = c.name\n return SubColumnFunc(origcol=c, subfunc=getitemdef, expr=idx, **kwargs)\n\nclass ExprColumn(Column):\n 'Column using *expr* to derive the value from each row.'\n def __init__(self, name, expr=None, **kwargs):\n super().__init__(name, **kwargs)\n self.expr = expr or name\n self.ncalcs = 0\n self.totaltime = 0\n self.maxtime = 0\n\n def calcValue(self, row):\n t0 = time.perf_counter()\n r = self.sheet.evalExpr(self.compiledExpr, row, col=self)\n t1 = time.perf_counter()\n self.ncalcs += 1\n self.maxtime = max(self.maxtime, t1-t0)\n self.totaltime += (t1-t0)\n return r\n\n def putValue(self, row, val):\n a = self.getDisplayValue(row)\n b = self.format(self.type(val))\n if a != b:\n vd.warning('%s calced %s not %s' % (self.name, a, b))\n\n @property\n def expr(self):\n return self._expr\n\n @expr.setter\n def expr(self, expr):\n self.compiledExpr = compile(expr, '<expr>', 'eval') if expr else None\n self._expr = expr\n\n\nclass SettableColumn(Column):\n 'Column using rowid to store and retrieve values internally.'\n def putValue(self, row, value):\n self._store[self.sheet.rowid(row)] = value\n\n def calcValue(self, row):\n return self._store.get(self.sheet.rowid(row), None)\n\n\nSettableColumn.init('_store', dict, copy=True)\n\n\n# synonyms\nColumnItem = ItemColumn\nColumnAttr = AttrColumn\nColumnExpr = ExprColumn\n", "path": "visidata/column.py" } ]
diff --git a/visidata/column.py b/visidata/column.py index 940050984..3b63954e4 100644 --- a/visidata/column.py +++ b/visidata/column.py @@ -451,6 +451,8 @@ def getattrdeep(obj, attr, *default, getter=getattr): try: # if attribute exists, return toplevel value, even if dotted if attr in obj: return getter(obj, attr) + except RecursionError: #1696 + raise except Exception as e: pass
liqd__a4-meinberlin-4707
rules/participate in project As you can see in the test, the paricipate_project rule behaves a bit weird for project group members. I think, they should also be allowed to participate. The question is what it is used for. Cool! The participate_project rule is a bit unexpected, so we should check that out. Like where it is used and what for. But anyway, will merge for now and add an issue. _Originally posted by @fuzzylogic2000 in https://github.com/liqd/a4-meinberlin/pull/4077#pullrequestreview-837466549_
[ { "content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.organisations.predicates import is_initiator\nfrom adhocracy4.projects.predicates import is_live\nfrom adhocracy4.projects.predicates import is_moderator\nfrom adhocracy4.projects.predicates import is_prj_group_member\nfrom adhocracy4.projects.predicates import is_project_member\nfrom adhocracy4.projects.predicates import is_public\nfrom adhocracy4.projects.predicates import is_semipublic\n\nrules.remove_perm('a4projects.view_project')\nrules.add_perm('a4projects.view_project',\n is_superuser | is_initiator |\n is_moderator | is_prj_group_member |\n ((is_public | is_semipublic | is_project_member)\n & is_live))\n\nrules.set_perm('a4projects.participate_in_project',\n is_superuser | is_initiator | is_moderator |\n ((is_public | is_project_member) & is_live))\n", "path": "meinberlin/apps/projects/rules.py" } ]
[ { "content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.organisations.predicates import is_initiator\nfrom adhocracy4.projects.predicates import is_live\nfrom adhocracy4.projects.predicates import is_moderator\nfrom adhocracy4.projects.predicates import is_prj_group_member\nfrom adhocracy4.projects.predicates import is_project_member\nfrom adhocracy4.projects.predicates import is_public\nfrom adhocracy4.projects.predicates import is_semipublic\n\nrules.remove_perm('a4projects.view_project')\nrules.add_perm('a4projects.view_project',\n is_superuser | is_initiator |\n is_moderator | is_prj_group_member |\n ((is_public | is_semipublic | is_project_member)\n & is_live))\n\nrules.set_perm('a4projects.participate_in_project',\n is_superuser | is_initiator |\n is_moderator | is_prj_group_member |\n ((is_public | is_project_member) & is_live))\n", "path": "meinberlin/apps/projects/rules.py" } ]
diff --git a/meinberlin/apps/projects/rules.py b/meinberlin/apps/projects/rules.py index b69d0d6f27..8f831a3d32 100644 --- a/meinberlin/apps/projects/rules.py +++ b/meinberlin/apps/projects/rules.py @@ -17,5 +17,6 @@ & is_live)) rules.set_perm('a4projects.participate_in_project', - is_superuser | is_initiator | is_moderator | + is_superuser | is_initiator | + is_moderator | is_prj_group_member | ((is_public | is_project_member) & is_live)) diff --git a/meinberlin/test/helpers.py b/meinberlin/test/helpers.py index f253eed1ac..8b2ab0ad2a 100644 --- a/meinberlin/test/helpers.py +++ b/meinberlin/test/helpers.py @@ -6,22 +6,20 @@ @factory.django.mute_signals(post_save) -def setup_group_member(organisation, project, group_factory, - user_factory): - group1 = group_factory() - group_member = user_factory.create(groups=(group1, )) - if organisation: - organisation.groups.add(group1) - else: - organisation = None - if project: - organisation = project.organisation - organisation.groups.add(group1) - project.group = group1 - project.save() - else: - project = None - return group_member, organisation, project +def setup_group_members(project, group_factory, user_factory): + group_org = group_factory() + project.organisation.groups.add(group_org) + group_member_in_org = user_factory.create(groups=(group_org, )) + + group_pro = group_factory() + project.group = group_pro + project.save() + group_member_in_pro = user_factory.create(groups=(group_pro, )) + + group_out = group_factory() + group_member_out = user_factory.create(groups=(group_out, )) + + return project, group_member_in_org, group_member_in_pro, group_member_out def assert_dashboard_form_component_response( diff --git a/tests/bplan/dashboard_components/test_views_bplan_basic.py b/tests/bplan/dashboard_components/test_views_bplan_basic.py index 934a302398..d5ae193294 100644 --- a/tests/bplan/dashboard_components/test_views_bplan_basic.py +++ b/tests/bplan/dashboard_components/test_views_bplan_basic.py @@ -6,7 +6,7 @@ from adhocracy4.test.helpers import redirect_target from meinberlin.apps.bplan.phases import StatementPhase from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('bplan') @@ -62,10 +62,10 @@ def test_edit_view_group_member( user_factory): module = module_factory(project=bplan) phase = phase_factory(phase_content=StatementPhase(), module=module) - group_member, organisation, bplan = setup_group_member( - None, bplan, group_factory, user_factory) + bplan, _, group_member_in_pro, _ = setup_group_members( + bplan, group_factory, user_factory) url = component.get_base_url(bplan) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) assert len(mail.outbox) == 1 diff --git a/tests/bplan/dashboard_components/test_views_bplan_plans.py b/tests/bplan/dashboard_components/test_views_bplan_plans.py index 037112093b..4ee879c146 100644 --- a/tests/bplan/dashboard_components/test_views_bplan_plans.py +++ b/tests/bplan/dashboard_components/test_views_bplan_plans.py @@ -4,7 +4,7 @@ from adhocracy4.test.helpers import redirect_target from meinberlin.apps.bplan.phases import StatementPhase from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('plans') @@ -37,11 +37,12 @@ def test_edit_view_group_member(client, bplan, module_factory, phase_factory, plan_factory, group_factory, user_factory): module = module_factory(project=bplan) phase_factory(phase_content=StatementPhase(), module=module) - group_member, organisation, bplan = setup_group_member( - None, bplan, group_factory, user_factory) + bplan, _, group_member_in_pro, _ = \ + setup_group_members(bplan, group_factory, user_factory) + organisation = bplan.organisation plan = plan_factory(organisation=organisation) url = component.get_base_url(bplan) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/bplan/rules/test_rules_add.py b/tests/bplan/rules/test_rules_add.py index af691d211f..af78e97f4a 100644 --- a/tests/bplan/rules/test_rules_add.py +++ b/tests/bplan/rules/test_rules_add.py @@ -2,7 +2,7 @@ import rules from django.contrib.auth.models import AnonymousUser -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_bplan.add_bplan' @@ -14,14 +14,18 @@ def test_perm_exists(): @pytest.mark.django_db -def test_add(organisation, user_factory, group_factory): +def test_add(project, user_factory, group_factory, admin): anonymous = AnonymousUser() user = user_factory() + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + organisation = project.organisation initiator = organisation.initiators.first() - group_member, organisation, _ = setup_group_member( - organisation, None, group_factory, user_factory) assert not rules.has_perm(perm_name, anonymous, organisation) assert not rules.has_perm(perm_name, user, organisation) assert rules.has_perm(perm_name, initiator, organisation) - assert not rules.has_perm(perm_name, group_member, organisation) + assert not rules.has_perm(perm_name, group_member_in_pro, organisation) + assert not rules.has_perm(perm_name, group_member_in_org, organisation) + assert not rules.has_perm(perm_name, group_member_out, organisation) + assert not rules.has_perm(perm_name, admin, organisation) diff --git a/tests/bplan/rules/test_rules_add_statement.py b/tests/bplan/rules/test_rules_add_statement.py index 69dca81497..aaddeaa179 100644 --- a/tests/bplan/rules/test_rules_add_statement.py +++ b/tests/bplan/rules/test_rules_add_statement.py @@ -7,6 +7,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.bplan import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_bplan.add_statement' @@ -16,42 +17,60 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, user): +def test_pre_phase(phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.StatementPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert not rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert not rules.has_perm(perm_name, initiator, module) + assert not rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_active(phase_factory, user): +def test_active(phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.StatementPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_post_phase(phase_factory, user): +def test_post_phase(phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.StatementPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert not rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert not rules.has_perm(perm_name, initiator, module) + assert not rules.has_perm(perm_name, admin, module) diff --git a/tests/bplan/rules/test_rules_change.py b/tests/bplan/rules/test_rules_change.py index adf0adbcf3..8e66187aa5 100644 --- a/tests/bplan/rules/test_rules_change.py +++ b/tests/bplan/rules/test_rules_change.py @@ -7,7 +7,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.bplan import phases -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_bplan.change_bplan' @@ -24,17 +24,20 @@ def test_pre_phase(phase_factory, user_factory, group_factory): phases.StatementPhase) anonymous, moderator, initiator = setup_users(project) user = user_factory() - group_member, _, project = setup_group_member(None, project, - group_factory, - user_factory) + admin = user_factory(is_superuser=True) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, project) assert not rules.has_perm(perm_name, user, project) + assert not rules.has_perm(perm_name, group_member_out, project) + assert not rules.has_perm(perm_name, group_member_in_org, project) + assert not rules.has_perm(perm_name, group_member_in_pro, project) assert not rules.has_perm(perm_name, moderator, project) assert rules.has_perm(perm_name, initiator, project) - assert not rules.has_perm(perm_name, group_member, project) + assert not rules.has_perm(perm_name, admin, project) @pytest.mark.django_db @@ -43,17 +46,20 @@ def test_phase_active(phase_factory, user_factory, group_factory): phases.StatementPhase) anonymous, moderator, initiator = setup_users(project) user = user_factory() - group_member, _, project = setup_group_member(None, project, - group_factory, - user_factory) + admin = user_factory(is_superuser=True) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, project) assert not rules.has_perm(perm_name, user, project) + assert not rules.has_perm(perm_name, group_member_out, project) + assert not rules.has_perm(perm_name, group_member_in_org, project) + assert not rules.has_perm(perm_name, group_member_in_pro, project) assert not rules.has_perm(perm_name, moderator, project) assert rules.has_perm(perm_name, initiator, project) - assert not rules.has_perm(perm_name, group_member, project) + assert not rules.has_perm(perm_name, admin, project) @pytest.mark.django_db @@ -64,16 +70,20 @@ def test_phase_active_project_draft(phase_factory, user_factory, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) user = user_factory() - group_member, _, project = setup_group_member(None, project, - group_factory, - user_factory) + admin = user_factory(is_superuser=True) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, project) assert not rules.has_perm(perm_name, user, project) + assert not rules.has_perm(perm_name, group_member_out, project) + assert not rules.has_perm(perm_name, group_member_in_org, project) + assert not rules.has_perm(perm_name, group_member_in_pro, project) assert not rules.has_perm(perm_name, moderator, project) assert rules.has_perm(perm_name, initiator, project) - assert not rules.has_perm(perm_name, group_member, project) + assert not rules.has_perm(perm_name, admin, project) @pytest.mark.django_db @@ -84,14 +94,17 @@ def test_post_phase_project_archived(phase_factory, user_factory, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) user = user_factory() - group_member, _, project = setup_group_member(None, project, - group_factory, - user_factory) + admin = user_factory(is_superuser=True) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, project) assert not rules.has_perm(perm_name, user, project) + assert not rules.has_perm(perm_name, group_member_out, project) + assert not rules.has_perm(perm_name, group_member_in_org, project) + assert not rules.has_perm(perm_name, group_member_in_pro, project) assert not rules.has_perm(perm_name, moderator, project) assert rules.has_perm(perm_name, initiator, project) - assert not rules.has_perm(perm_name, group_member, project) + assert not rules.has_perm(perm_name, admin, project) diff --git a/tests/budgeting/rules/test_rules_add.py b/tests/budgeting/rules/test_rules_add.py index 7868dc23ad..6ff2f12124 100644 --- a/tests/budgeting/rules/test_rules_add.py +++ b/tests/budgeting/rules/test_rules_add.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_budgeting.add_proposal' @@ -17,124 +18,179 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, user): +def test_pre_phase(phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_request_phase_active(phase_factory, user): +def test_request_phase_active(phase_factory, user, admin, user_factory, + group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_collect_phase_active(phase_factory, user): +def test_collect_phase_active(phase_factory, user, admin, user_factory, + group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_rating_phase_active(phase_factory, user): +def test_rating_phase_active(phase_factory, user, admin, user_factory, + group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, user, user2): +def test_phase_active_project_private(phase_factory, user, admin, + user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, participant, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, user, user2): +def test_phase_active_project_semipublic(phase_factory, user, admin, + user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, participant, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, user): +def test_phase_active_project_draft(phase_factory, user, admin, user_factory, + group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, user): +def test_post_phase_project_archived(phase_factory, user, admin, user_factory, + group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) diff --git a/tests/budgeting/rules/test_rules_change.py b/tests/budgeting/rules/test_rules_change.py index 9f8420f765..6c94e1c01b 100644 --- a/tests/budgeting/rules/test_rules_change.py +++ b/tests/budgeting/rules/test_rules_change.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_budgeting.change_proposal' @@ -17,79 +18,114 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_request_phase_active(phase_factory, proposal_factory, user): +def test_request_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_collect_phase_active(phase_factory, proposal_factory, user): +def test_collect_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_rating_phase_active(phase_factory, proposal_factory, user): +def test_rating_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -98,20 +134,27 @@ def test_phase_active_project_private(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -120,39 +163,59 @@ def test_phase_active_project_semipublic(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/budgeting/rules/test_rules_comment.py b/tests/budgeting/rules/test_rules_comment.py index cada056e06..21f5fff092 100644 --- a/tests/budgeting/rules/test_rules_comment.py +++ b/tests/budgeting/rules/test_rules_comment.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_budgeting.comment_proposal' @@ -17,126 +18,204 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_request_phase_active(phase_factory, proposal_factory, user): +def test_request_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_collect_phase_active(phase_factory, proposal_factory, user): +def test_collect_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_rating_phase_active(phase_factory, proposal_factory, user): +def test_rating_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/budgeting/rules/test_rules_moderate.py b/tests/budgeting/rules/test_rules_moderate.py index c0501b64d0..e5245dd14f 100644 --- a/tests/budgeting/rules/test_rules_moderate.py +++ b/tests/budgeting/rules/test_rules_moderate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_budgeting.moderate_proposal' @@ -17,79 +18,114 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_request_phase_active(phase_factory, proposal_factory, user): +def test_request_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_collect_phase_active(phase_factory, proposal_factory, user): +def test_collect_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_rating_phase_active(phase_factory, proposal_factory, user): +def test_rating_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -98,20 +134,27 @@ def test_phase_active_project_private(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -120,39 +163,59 @@ def test_phase_active_project_semipublic(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) diff --git a/tests/budgeting/rules/test_rules_rate.py b/tests/budgeting/rules/test_rules_rate.py index fa032d4089..def450ad58 100644 --- a/tests/budgeting/rules/test_rules_rate.py +++ b/tests/budgeting/rules/test_rules_rate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_budgeting.rate_proposal' @@ -17,132 +18,204 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_request_phase_active(phase_factory, proposal_factory, user): +def test_request_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_collect_phase_active(phase_factory, proposal_factory, user): +def test_collect_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_rating_phase_active(phase_factory, proposal_factory, user): +def test_rating_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/budgeting/rules/test_rules_support.py b/tests/budgeting/rules/test_rules_support.py index c555c03a58..7276a3aed5 100644 --- a/tests/budgeting/rules/test_rules_support.py +++ b/tests/budgeting/rules/test_rules_support.py @@ -11,6 +11,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_budgeting.support_proposal' @@ -20,38 +21,57 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user, admin): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_support_phase_active(phase_factory, proposal_factory, user, admin): +def test_support_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.SupportPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_between_support_and_voting_phase(phase_factory, proposal_factory, - user, admin): +def test_between_support_and_voting_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): support_phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.SupportPhase) @@ -65,61 +85,94 @@ def test_between_support_and_voting_phase(phase_factory, proposal_factory, between_phases = support_phase.end_date + timedelta(hours=1) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_time(between_phases): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_voting_phase_active(phase_factory, proposal_factory, user, admin): +def test_voting_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.VotingPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_rating_phase_active(phase_factory, proposal_factory, user, admin): +def test_rating_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2, admin): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.SupportPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, participant, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -127,20 +180,28 @@ def test_phase_active_project_private(phase_factory, proposal_factory, @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2, admin): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.SupportPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, participant, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -148,34 +209,50 @@ def test_phase_active_project_semipublic(phase_factory, proposal_factory, @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, - user, admin): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.SupportPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, - user, admin): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.SupportPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) assert rules.has_perm(perm_name, admin, item) diff --git a/tests/budgeting/rules/test_rules_view.py b/tests/budgeting/rules/test_rules_view.py index 118ca3477f..9442a13c73 100644 --- a/tests/budgeting/rules/test_rules_view.py +++ b/tests/budgeting/rules/test_rules_view.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_budgeting.view_proposal' @@ -17,124 +18,202 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_request_phase_active(phase_factory, proposal_factory, user): +def test_request_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_collect_phase_active(phase_factory, proposal_factory, user): +def test_collect_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_rating_phase_active(phase_factory, proposal_factory, user): +def test_rating_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + participant = user_factory() project.participants.add(participant) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, participant, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + participant = user_factory() project.participants.add(participant) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, participant, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/budgeting/rules/test_rules_view_support.py b/tests/budgeting/rules/test_rules_view_support.py index 89903bac69..7f15dce13b 100644 --- a/tests/budgeting/rules/test_rules_view_support.py +++ b/tests/budgeting/rules/test_rules_view_support.py @@ -11,6 +11,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_budgeting.view_support' @@ -20,38 +21,53 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user, admin): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_support_phase_active(phase_factory, proposal_factory, user, admin): +def test_support_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.SupportPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_between_support_and_voting_phase(phase_factory, proposal_factory, - user, admin): +def test_between_support_and_voting_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): support_phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.SupportPhase) @@ -65,61 +81,86 @@ def test_between_support_and_voting_phase(phase_factory, proposal_factory, between_phases = support_phase.end_date + timedelta(hours=1) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_time(between_phases): assert rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_voting_phase_active(phase_factory, proposal_factory, user, admin): +def test_voting_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.VotingPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_rating_phase_active(phase_factory, proposal_factory, user, admin): +def test_rating_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2, admin): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase( phase_factory, proposal_factory, phases.SupportPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, participant, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) @@ -127,20 +168,26 @@ def test_phase_active_project_private(phase_factory, proposal_factory, @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2, admin): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase( phase_factory, proposal_factory, phases.SupportPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, participant, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) @@ -148,35 +195,47 @@ def test_phase_active_project_semipublic(phase_factory, proposal_factory, @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user, - admin): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase(phase_factory, proposal_factory, phases.SupportPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user, - admin): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, module, project, item = setup_phase( phase_factory, proposal_factory, phases.SupportPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) assert rules.has_perm(perm_name, admin, module) diff --git a/tests/budgeting/rules/test_rules_vote.py b/tests/budgeting/rules/test_rules_vote.py index c46f4ac52c..e6b6e49612 100644 --- a/tests/budgeting/rules/test_rules_vote.py +++ b/tests/budgeting/rules/test_rules_vote.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_budgeting.vote_proposal' @@ -20,186 +21,257 @@ def test_perm_exists(): # I guess the permissions aren't completely tested by testing the rules here # b/c the existing rule only checks for the phase being active... @pytest.mark.django_db -def test_pre_phase(user_factory, phase_factory, proposal_factory, user): - phase, _, project, proposal = setup_phase(phase_factory, proposal_factory, - phases.RequestPhase) +def test_pre_phase( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( + phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): - assert not rules.has_perm(perm_name, anonymous, proposal) - assert not rules.has_perm(perm_name, user, proposal) - assert not rules.has_perm(perm_name, moderator, proposal) - assert not rules.has_perm(perm_name, initiator, proposal) - assert rules.has_perm(perm_name, admin, proposal) + assert not rules.has_perm(perm_name, anonymous, item) + assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) + assert not rules.has_perm(perm_name, moderator, item) + assert not rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_request_phase_active(user_factory, phase_factory, proposal_factory, - user): - phase, _, project, proposal = setup_phase(phase_factory, proposal_factory, - phases.RequestPhase) +def test_request_phase_active( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( + phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): - assert not rules.has_perm(perm_name, anonymous, proposal) - assert not rules.has_perm(perm_name, user, proposal) - assert not rules.has_perm(perm_name, moderator, proposal) - assert not rules.has_perm(perm_name, initiator, proposal) - assert rules.has_perm(perm_name, admin, proposal) + assert not rules.has_perm(perm_name, anonymous, item) + assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) + assert not rules.has_perm(perm_name, moderator, item) + assert not rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_collect_phase_active(user_factory, phase_factory, proposal_factory, - user): - phase, _, project, proposal = setup_phase(phase_factory, proposal_factory, - phases.CollectPhase) +def test_collect_phase_active( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( + phase_factory, proposal_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): - assert not rules.has_perm(perm_name, anonymous, proposal) - assert not rules.has_perm(perm_name, user, proposal) - assert rules.has_perm(perm_name, admin, proposal) - assert not rules.has_perm(perm_name, moderator, proposal) - assert not rules.has_perm(perm_name, initiator, proposal) + assert not rules.has_perm(perm_name, anonymous, item) + assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) + assert not rules.has_perm(perm_name, moderator, item) + assert not rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_rating_phase_active(user_factory, phase_factory, proposal_factory, - user): - phase, _, project, proposal = setup_phase(phase_factory, proposal_factory, - phases.RatingPhase) +def test_rating_phase_active( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( + phase_factory, proposal_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): - assert not rules.has_perm(perm_name, anonymous, proposal) - assert not rules.has_perm(perm_name, user, proposal) - assert rules.has_perm(perm_name, admin, proposal) - assert not rules.has_perm(perm_name, moderator, proposal) - assert not rules.has_perm(perm_name, initiator, proposal) + assert not rules.has_perm(perm_name, anonymous, item) + assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) + assert not rules.has_perm(perm_name, moderator, item) + assert not rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_voting_phase_active(user_factory, phase_factory, proposal_factory, - user): - phase, _, project, proposal = setup_phase(phase_factory, proposal_factory, - phases.VotingPhase) +def test_voting_phase_active( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( + phase_factory, proposal_factory, phases.VotingPhase) anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): - assert rules.has_perm(perm_name, anonymous, proposal) - assert rules.has_perm(perm_name, user, proposal) - assert rules.has_perm(perm_name, admin, proposal) - assert rules.has_perm(perm_name, moderator, proposal) - assert rules.has_perm(perm_name, initiator, proposal) + assert rules.has_perm(perm_name, anonymous, item) + assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) + assert rules.has_perm(perm_name, moderator, item) + assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(user_factory, phase_factory, - proposal_factory, user, user2): - phase, _, project, proposal = setup_phase( +def test_phase_active_project_private( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.VotingPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE with freeze_phase(phase): - assert rules.has_perm(perm_name, anonymous, proposal) - assert rules.has_perm(perm_name, user, proposal) - assert rules.has_perm(perm_name, participant, proposal) - assert rules.has_perm(perm_name, moderator, proposal) - assert rules.has_perm(perm_name, initiator, proposal) - assert rules.has_perm(perm_name, admin, proposal) + assert rules.has_perm(perm_name, anonymous, item) + assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, participant, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) + assert rules.has_perm(perm_name, moderator, item) + assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(user_factory, phase_factory, - proposal_factory, user, user2): - phase, _, project, proposal = setup_phase( +def test_phase_active_project_semipublic( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.VotingPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): - assert rules.has_perm(perm_name, anonymous, proposal) - assert rules.has_perm(perm_name, user, proposal) - assert rules.has_perm(perm_name, participant, proposal) - assert rules.has_perm(perm_name, moderator, proposal) - assert rules.has_perm(perm_name, initiator, proposal) - assert rules.has_perm(perm_name, admin, proposal) + assert rules.has_perm(perm_name, anonymous, item) + assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) + assert rules.has_perm(perm_name, participant, item) + assert rules.has_perm(perm_name, moderator, item) + assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(user_factory, phase_factory, - proposal_factory, user): - phase, _, project, proposal = setup_phase( +def test_phase_active_project_draft( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.VotingPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): - assert not rules.has_perm(perm_name, anonymous, proposal) - assert not rules.has_perm(perm_name, user, proposal) - assert not rules.has_perm(perm_name, moderator, proposal) - assert not rules.has_perm(perm_name, initiator, proposal) - assert rules.has_perm(perm_name, admin, proposal) + assert not rules.has_perm(perm_name, anonymous, item) + assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) + assert not rules.has_perm(perm_name, moderator, item) + assert not rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(user_factory, phase_factory, - proposal_factory, user): - phase, _, project, proposal = setup_phase( +def test_post_phase_project_archived( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.VotingPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): - assert not rules.has_perm(perm_name, anonymous, proposal) - assert not rules.has_perm(perm_name, user, proposal) - assert not rules.has_perm(perm_name, moderator, proposal) - assert not rules.has_perm(perm_name, initiator, proposal) - assert rules.has_perm(perm_name, admin, proposal) + assert not rules.has_perm(perm_name, anonymous, item) + assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) + assert not rules.has_perm(perm_name, moderator, item) + assert not rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_proposal_archived(user_factory, phase_factory, - proposal_factory, user): - phase, _, project, proposal = setup_phase( +def test_phase_active_proposal_archived( + user_factory, phase_factory, proposal_factory, user, admin, + group_factory): + phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.VotingPhase, module__project__is_draft=True) - proposal.is_archived = True - proposal.save() + item.is_archived = True + item.save() anonymous, moderator, initiator = setup_users(project) - admin = user_factory(is_superuser=True) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): - assert not rules.has_perm(perm_name, anonymous, proposal) - assert not rules.has_perm(perm_name, user, proposal) - assert not rules.has_perm(perm_name, moderator, proposal) - assert not rules.has_perm(perm_name, initiator, proposal) - assert rules.has_perm(perm_name, admin, proposal) + assert not rules.has_perm(perm_name, anonymous, item) + assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) + assert not rules.has_perm(perm_name, moderator, item) + assert not rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/budgeting/test_proposals_api_filtering.py b/tests/budgeting/test_proposals_api_filtering.py index 507a043321..21b257d45a 100644 --- a/tests/budgeting/test_proposals_api_filtering.py +++ b/tests/budgeting/test_proposals_api_filtering.py @@ -11,7 +11,6 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.budgeting import phases -from meinberlin.test.helpers import setup_group_member @pytest.mark.django_db @@ -19,16 +18,17 @@ def test_proposal_list_filter_mixin(apiclient, user_factory, group_factory, phase_factory, proposal_factory, category_factory, label_factory, moderation_task_factory): - support_phase, module, project, proposal = setup_phase(phase_factory, - proposal_factory, - phases.SupportPhase) + support_phase, module, project, proposal = setup_phase( + phase_factory, proposal_factory, phases.SupportPhase + ) user = user_factory() - group_member, organisation, project = setup_group_member(None, project, - group_factory, - user_factory) - anonymous, moderator, initiator = setup_users(project) + group_pro = group_factory() + project.group = group_pro + project.save() + group_member = user_factory.create(groups=(group_pro, )) + voting_phase = phase_factory( phase_content=phases.VotingPhase(), module=module, diff --git a/tests/documents/rules/test_rules_add_chapter.py b/tests/documents/rules/test_rules_add_chapter.py index 15d97e6ea9..47a5ee68c1 100644 --- a/tests/documents/rules/test_rules_add_chapter.py +++ b/tests/documents/rules/test_rules_add_chapter.py @@ -8,7 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.documents import phases -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_documents.add_chapter' @@ -24,17 +24,17 @@ def test_pre_phase(phase_factory, user_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) - assert not rules.has_perm(perm_name, group_member_in_orga, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) assert not rules.has_perm(perm_name, group_member_out, module) assert not rules.has_perm(perm_name, moderator, module) - assert rules.has_perm(perm_name, group_member_in_project, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, initiator, module) @@ -45,17 +45,17 @@ def test_phase_active(phase_factory, user_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) - assert not rules.has_perm(perm_name, group_member_in_orga, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) assert not rules.has_perm(perm_name, group_member_out, module) assert not rules.has_perm(perm_name, moderator, module) - assert rules.has_perm(perm_name, group_member_in_project, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, initiator, module) @@ -69,18 +69,18 @@ def test_phase_active_project_private(phase_factory, user_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) assert not rules.has_perm(perm_name, participant, module) - assert not rules.has_perm(perm_name, group_member_in_orga, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) assert not rules.has_perm(perm_name, group_member_out, module) assert not rules.has_perm(perm_name, moderator, module) - assert rules.has_perm(perm_name, group_member_in_project, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, initiator, module) @@ -94,18 +94,18 @@ def test_phase_active_project_semipublic(phase_factory, user_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) assert not rules.has_perm(perm_name, participant, module) - assert not rules.has_perm(perm_name, group_member_in_orga, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) assert not rules.has_perm(perm_name, group_member_out, module) assert not rules.has_perm(perm_name, moderator, module) - assert rules.has_perm(perm_name, group_member_in_project, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, initiator, module) @@ -117,17 +117,17 @@ def test_phase_active_project_draft(phase_factory, user_factory, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) - assert not rules.has_perm(perm_name, group_member_in_orga, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) assert not rules.has_perm(perm_name, group_member_out, module) assert not rules.has_perm(perm_name, moderator, module) - assert rules.has_perm(perm_name, group_member_in_project, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, initiator, module) @@ -139,15 +139,15 @@ def test_post_phase_project_archived(phase_factory, user_factory, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) - assert not rules.has_perm(perm_name, group_member_in_orga, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) assert not rules.has_perm(perm_name, group_member_out, module) assert not rules.has_perm(perm_name, moderator, module) - assert rules.has_perm(perm_name, group_member_in_project, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, initiator, module) diff --git a/tests/documents/rules/test_rules_change_chapter.py b/tests/documents/rules/test_rules_change_chapter.py index 79aad9b32e..b1eb2e2370 100644 --- a/tests/documents/rules/test_rules_change_chapter.py +++ b/tests/documents/rules/test_rules_change_chapter.py @@ -8,7 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.documents import phases -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_documents.change_chapter' @@ -27,18 +27,18 @@ def test_pre_phase(phase_factory, chapter_factory, creator = item.creator anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) assert not rules.has_perm(perm_name, moderator, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, initiator, item) @@ -51,18 +51,18 @@ def test_phase_active(phase_factory, chapter_factory, creator = item.creator anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) assert not rules.has_perm(perm_name, moderator, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, initiator, item) @@ -78,8 +78,8 @@ def test_phase_active_project_private(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PRIVATE with freeze_phase(phase): @@ -87,10 +87,10 @@ def test_phase_active_project_private(phase_factory, chapter_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) assert not rules.has_perm(perm_name, moderator, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, initiator, item) @@ -106,8 +106,8 @@ def test_phase_active_project_semipublic(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): @@ -115,10 +115,10 @@ def test_phase_active_project_semipublic(phase_factory, chapter_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) assert not rules.has_perm(perm_name, moderator, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, initiator, item) @@ -132,18 +132,18 @@ def test_phase_active_project_draft(phase_factory, chapter_factory, creator = item.creator anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) assert not rules.has_perm(perm_name, moderator, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, initiator, item) @@ -157,16 +157,16 @@ def test_post_phase_project_archived(phase_factory, chapter_factory, creator = item.creator anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) assert not rules.has_perm(perm_name, moderator, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, initiator, item) diff --git a/tests/documents/rules/test_rules_comment_chapter.py b/tests/documents/rules/test_rules_comment_chapter.py index a191d0c022..5a55e6d365 100644 --- a/tests/documents/rules/test_rules_comment_chapter.py +++ b/tests/documents/rules/test_rules_comment_chapter.py @@ -8,7 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.documents import phases -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_documents.comment_chapter' @@ -24,16 +24,18 @@ def test_pre_phase(phase_factory, chapter_factory, user_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -45,16 +47,18 @@ def test_phase_active(phase_factory, chapter_factory, user_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) - assert rules.has_perm(perm_name, group_member_in_orga, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_in_org, item) assert rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -70,16 +74,18 @@ def test_phase_active_project_private(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, participant, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -96,16 +102,18 @@ def test_phase_active_project_semipublic(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, participant, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -120,16 +128,18 @@ def test_phase_active_project_draft(phase_factory, chapter_factory, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -144,15 +154,17 @@ def test_post_phase_project_archived(phase_factory, chapter_factory, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) diff --git a/tests/documents/rules/test_rules_comment_paragraph.py b/tests/documents/rules/test_rules_comment_paragraph.py index c6a1e3c15e..424b72903e 100644 --- a/tests/documents/rules/test_rules_comment_paragraph.py +++ b/tests/documents/rules/test_rules_comment_paragraph.py @@ -8,7 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.documents import phases -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_documents.comment_paragraph' @@ -24,8 +24,9 @@ def test_pre_phase(phase_factory, chapter_factory, paragraph_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -33,9 +34,10 @@ def test_pre_phase(phase_factory, chapter_factory, paragraph_factory, with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, paragraph) assert not rules.has_perm(perm_name, user, paragraph) - assert not rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert not rules.has_perm(perm_name, creator, paragraph) + assert not rules.has_perm(perm_name, group_member_in_org, paragraph) assert not rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -47,8 +49,9 @@ def test_phase_active(phase_factory, chapter_factory, paragraph_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -56,9 +59,10 @@ def test_phase_active(phase_factory, chapter_factory, paragraph_factory, with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, paragraph) assert rules.has_perm(perm_name, user, paragraph) - assert rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert rules.has_perm(perm_name, creator, paragraph) + assert rules.has_perm(perm_name, group_member_in_org, paragraph) assert rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -74,8 +78,9 @@ def test_phase_active_project_private(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -83,9 +88,10 @@ def test_phase_active_project_private(phase_factory, chapter_factory, with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, paragraph) assert not rules.has_perm(perm_name, user, paragraph) - assert not rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert not rules.has_perm(perm_name, creator, paragraph) + assert not rules.has_perm(perm_name, group_member_in_org, paragraph) assert not rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, participant, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -102,8 +108,9 @@ def test_phase_active_project_semipublic(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -111,9 +118,10 @@ def test_phase_active_project_semipublic(phase_factory, chapter_factory, with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, paragraph) assert not rules.has_perm(perm_name, user, paragraph) - assert not rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert not rules.has_perm(perm_name, creator, paragraph) + assert not rules.has_perm(perm_name, group_member_in_org, paragraph) assert not rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, participant, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -128,8 +136,9 @@ def test_phase_active_project_draft(phase_factory, chapter_factory, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -137,9 +146,10 @@ def test_phase_active_project_draft(phase_factory, chapter_factory, with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, paragraph) assert not rules.has_perm(perm_name, user, paragraph) - assert not rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert not rules.has_perm(perm_name, creator, paragraph) + assert not rules.has_perm(perm_name, group_member_in_org, paragraph) assert not rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -153,8 +163,9 @@ def test_post_phase_project_archived(phase_factory, chapter_factory, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -162,8 +173,9 @@ def test_post_phase_project_archived(phase_factory, chapter_factory, with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, paragraph) assert not rules.has_perm(perm_name, user, paragraph) - assert not rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert not rules.has_perm(perm_name, creator, paragraph) + assert not rules.has_perm(perm_name, group_member_in_org, paragraph) assert not rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) diff --git a/tests/documents/rules/test_rules_view_chapter.py b/tests/documents/rules/test_rules_view_chapter.py index 5e152d6011..6bdc12397a 100644 --- a/tests/documents/rules/test_rules_view_chapter.py +++ b/tests/documents/rules/test_rules_view_chapter.py @@ -8,7 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.documents import phases -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_documents.view_chapter' @@ -25,16 +25,18 @@ def test_pre_phase(phase_factory, chapter_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) - assert rules.has_perm(perm_name, group_member_in_orga, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_in_org, item) assert rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -46,16 +48,18 @@ def test_phase_active(phase_factory, chapter_factory, phase, _, project, item = setup_phase(phase_factory, chapter_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) - assert rules.has_perm(perm_name, group_member_in_orga, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_in_org, item) assert rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -71,16 +75,18 @@ def test_phase_active_project_private(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, participant, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -97,16 +103,18 @@ def test_phase_active_project_semipublic(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) - assert rules.has_perm(perm_name, group_member_in_orga, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_in_org, item) assert rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, participant, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -121,16 +129,18 @@ def test_phase_active_project_draft(phase_factory, chapter_factory, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) - assert not rules.has_perm(perm_name, group_member_in_orga, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) assert not rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) @@ -144,15 +154,17 @@ def test_post_phase_project_archived(phase_factory, chapter_factory, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_archived with freeze_post_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) - assert rules.has_perm(perm_name, group_member_in_orga, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_in_org, item) assert rules.has_perm(perm_name, group_member_out, item) - assert rules.has_perm(perm_name, group_member_in_project, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) diff --git a/tests/documents/rules/test_rules_view_paragraph.py b/tests/documents/rules/test_rules_view_paragraph.py index ce5c188f14..f0b678be56 100644 --- a/tests/documents/rules/test_rules_view_paragraph.py +++ b/tests/documents/rules/test_rules_view_paragraph.py @@ -8,7 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.documents import phases -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_documents.view_paragraph' @@ -25,8 +25,9 @@ def test_pre_phase(phase_factory, chapter_factory, paragraph_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -34,9 +35,10 @@ def test_pre_phase(phase_factory, chapter_factory, paragraph_factory, with freeze_pre_phase(phase): assert rules.has_perm(perm_name, anonymous, paragraph) assert rules.has_perm(perm_name, user, paragraph) - assert rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert rules.has_perm(perm_name, creator, paragraph) + assert rules.has_perm(perm_name, group_member_in_org, paragraph) assert rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -49,8 +51,9 @@ def test_phase_active(phase_factory, chapter_factory, paragraph_factory, phases.CommentPhase) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -58,9 +61,10 @@ def test_phase_active(phase_factory, chapter_factory, paragraph_factory, with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, paragraph) assert rules.has_perm(perm_name, user, paragraph) - assert rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert rules.has_perm(perm_name, creator, paragraph) + assert rules.has_perm(perm_name, group_member_in_org, paragraph) assert rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -77,8 +81,9 @@ def test_phase_active_project_private(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -86,9 +91,10 @@ def test_phase_active_project_private(phase_factory, chapter_factory, with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, paragraph) assert not rules.has_perm(perm_name, user, paragraph) - assert not rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert not rules.has_perm(perm_name, creator, paragraph) + assert not rules.has_perm(perm_name, group_member_in_org, paragraph) assert not rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, participant, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -106,8 +112,9 @@ def test_phase_active_project_semipublic(phase_factory, chapter_factory, anonymous, moderator, initiator = setup_users(project) participant = user_factory() project.participants.add(participant) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -115,9 +122,10 @@ def test_phase_active_project_semipublic(phase_factory, chapter_factory, with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, paragraph) assert rules.has_perm(perm_name, user, paragraph) - assert rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert rules.has_perm(perm_name, creator, paragraph) + assert rules.has_perm(perm_name, group_member_in_org, paragraph) assert rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, participant, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -133,8 +141,9 @@ def test_phase_active_project_draft(phase_factory, chapter_factory, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -142,9 +151,10 @@ def test_phase_active_project_draft(phase_factory, chapter_factory, with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, paragraph) assert not rules.has_perm(perm_name, user, paragraph) - assert not rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert not rules.has_perm(perm_name, creator, paragraph) + assert not rules.has_perm(perm_name, group_member_in_org, paragraph) assert not rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) @@ -159,8 +169,9 @@ def test_post_phase_project_archived(phase_factory, chapter_factory, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator paragraph = paragraph_factory(chapter=item) @@ -168,8 +179,9 @@ def test_post_phase_project_archived(phase_factory, chapter_factory, with freeze_post_phase(phase): assert rules.has_perm(perm_name, anonymous, paragraph) assert rules.has_perm(perm_name, user, paragraph) - assert rules.has_perm(perm_name, group_member_in_orga, paragraph) + assert rules.has_perm(perm_name, creator, paragraph) + assert rules.has_perm(perm_name, group_member_in_org, paragraph) assert rules.has_perm(perm_name, group_member_out, paragraph) - assert rules.has_perm(perm_name, group_member_in_project, paragraph) + assert rules.has_perm(perm_name, group_member_in_pro, paragraph) assert rules.has_perm(perm_name, moderator, paragraph) assert rules.has_perm(perm_name, initiator, paragraph) diff --git a/tests/extprojects/dashboard_components/test_views_extproject_basic.py b/tests/extprojects/dashboard_components/test_views_extproject_basic.py index 889cbcd226..2e03ac4b6a 100644 --- a/tests/extprojects/dashboard_components/test_views_extproject_basic.py +++ b/tests/extprojects/dashboard_components/test_views_extproject_basic.py @@ -5,7 +5,7 @@ from adhocracy4.test.helpers import redirect_target from meinberlin.apps.extprojects.phases import ExternalPhase from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('external') @@ -55,13 +55,13 @@ def test_edit_view(client, phase_factory, external_project, module_factory): def test_edit_view_group_member( client, phase_factory, external_project, module_factory, group_factory, user_factory): - group_member, organisation, external_project = setup_group_member( - None, external_project, group_factory, user_factory) + project, _, group_member_in_pro, _ = \ + setup_group_members(external_project, group_factory, user_factory) module = module_factory(project=external_project) phase = phase_factory(phase_content=ExternalPhase(), module=module) url = component.get_base_url(external_project) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/extprojects/dashboard_components/test_views_extproject_plans.py b/tests/extprojects/dashboard_components/test_views_extproject_plans.py index 3173e1c359..627a2b30bd 100644 --- a/tests/extprojects/dashboard_components/test_views_extproject_plans.py +++ b/tests/extprojects/dashboard_components/test_views_extproject_plans.py @@ -3,7 +3,7 @@ from adhocracy4.dashboard import components from adhocracy4.test.helpers import redirect_target from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('plans') @@ -34,11 +34,12 @@ def test_edit_view(client, external_project, plan_factory): def test_edit_view_group_member( client, external_project, plan_factory, group_factory, user_factory): - group_member, organisation, external_project = setup_group_member( - None, external_project, group_factory, user_factory) + external_project, _, group_member_in_pro, _ = \ + setup_group_members(external_project, group_factory, user_factory) + organisation = external_project.organisation plan = plan_factory(organisation=organisation) url = component.get_base_url(external_project) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/extprojects/dashboard_components/test_views_extproject_point.py b/tests/extprojects/dashboard_components/test_views_extproject_point.py index 2eb1e5f112..6f3bd8c5d2 100644 --- a/tests/extprojects/dashboard_components/test_views_extproject_point.py +++ b/tests/extprojects/dashboard_components/test_views_extproject_point.py @@ -5,7 +5,7 @@ from adhocracy4.dashboard import components from adhocracy4.test.helpers import redirect_target from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('point') @@ -43,10 +43,10 @@ def test_edit_view(client, external_project, administrative_district): def test_edit_view_group_member( client, external_project, administrative_district, group_factory, user_factory): - group_member, _, external_project = setup_group_member( - None, external_project, group_factory, user_factory) + external_project, _, group_member_in_pro, _ = \ + setup_group_members(external_project, group_factory, user_factory) url = component.get_base_url(external_project) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/extprojects/dashboard_components/test_views_extproject_topics.py b/tests/extprojects/dashboard_components/test_views_extproject_topics.py index 41d274633c..43bf164aa1 100644 --- a/tests/extprojects/dashboard_components/test_views_extproject_topics.py +++ b/tests/extprojects/dashboard_components/test_views_extproject_topics.py @@ -4,7 +4,7 @@ from adhocracy4.dashboard import components from adhocracy4.test.helpers import redirect_target from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('topics') @@ -33,10 +33,10 @@ def test_edit_view(client, external_project): @pytest.mark.django_db def test_edit_view_gourp_member(client, external_project, group_factory, user_factory): - group_member, _, external_project = setup_group_member( - None, external_project, group_factory, user_factory) + external_project, _, group_member_in_pro, _ = \ + setup_group_members(external_project, group_factory, user_factory) url = component.get_base_url(external_project) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/helpers.py b/tests/helpers.py index 362f7b04f1..90932a6c5d 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -12,19 +12,3 @@ def __eq__(self, actual): def __repr__(self): return self._regex.pattern - - -def setup_group_users(user_factory, group_factory, project): - group1 = group_factory() - group2 = group_factory() - group3 = group_factory() - group_member_in_orga = user_factory.create(groups=(group1, group2)) - group_member_out = user_factory.create(groups=(group2,)) - group_member_in_project = user_factory.create(groups=(group2, group3)) - - project.organisation.groups.add(group1) - project.group = group3 - project.save() - - return group_member_in_orga, group_member_out, group_member_in_project, \ - project diff --git a/tests/ideas/rules/test_rules_add.py b/tests/ideas/rules/test_rules_add.py index 09ea0d846a..717eb8e336 100644 --- a/tests/ideas/rules/test_rules_add.py +++ b/tests/ideas/rules/test_rules_add.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.ideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_ideas.add_idea' @@ -17,41 +18,58 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, user): +def test_pre_phase( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active(phase_factory, user): +def test_phase_active( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, user, user2): +def test_phase_active_project_private( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +77,25 @@ def test_phase_active_project_private(phase_factory, user, user2): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) assert rules.has_perm(perm_name, participant, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, user, user2): +def test_phase_active_project_semipublic( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +103,53 @@ def test_phase_active_project_semipublic(phase_factory, user, user2): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) assert rules.has_perm(perm_name, participant, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, user): +def test_phase_active_project_draft( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, user): +def test_post_phase_project_archived( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) diff --git a/tests/ideas/rules/test_rules_change.py b/tests/ideas/rules/test_rules_change.py index 7e5c57afc0..0d1967121a 100644 --- a/tests/ideas/rules/test_rules_change.py +++ b/tests/ideas/rules/test_rules_change.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.ideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_ideas.change_idea' @@ -17,47 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, idea_factory, user): +def test_pre_phase( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, idea_factory, user): +def test_phase_active( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, idea_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, idea_factory, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -66,20 +86,27 @@ def test_phase_active_project_private(phase_factory, idea_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, idea_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, idea_factory, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -88,39 +115,59 @@ def test_phase_active_project_semipublic(phase_factory, idea_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, idea_factory, user): +def test_phase_active_project_draft( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, idea_factory, user): +def test_post_phase_project_archived( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/ideas/rules/test_rules_comment.py b/tests/ideas/rules/test_rules_comment.py index b9b77a02cd..09aae1ce52 100644 --- a/tests/ideas/rules/test_rules_comment.py +++ b/tests/ideas/rules/test_rules_comment.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.ideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_ideas.comment_idea' @@ -17,42 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, idea_factory, user): +def test_pre_phase( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, idea_factory, user): +def test_phase_active( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, idea_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, idea_factory, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -60,19 +85,28 @@ def test_phase_active_project_private(phase_factory, idea_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, idea_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, idea_factory, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -80,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, idea_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, idea_factory, user): +def test_phase_active_project_draft( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, idea_factory, user): +def test_post_phase_project_archived( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/ideas/rules/test_rules_moderate.py b/tests/ideas/rules/test_rules_moderate.py index 5df72b8c55..bd1366dc68 100644 --- a/tests/ideas/rules/test_rules_moderate.py +++ b/tests/ideas/rules/test_rules_moderate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.ideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_ideas.moderate_idea' @@ -17,47 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, idea_factory, user): +def test_pre_phase( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, idea_factory, user): +def test_phase_active( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, idea_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, idea_factory, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -66,20 +86,27 @@ def test_phase_active_project_private(phase_factory, idea_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, idea_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, idea_factory, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -88,39 +115,59 @@ def test_phase_active_project_semipublic(phase_factory, idea_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, idea_factory, user): +def test_phase_active_project_draft( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, idea_factory, user): +def test_post_phase_project_archived( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) diff --git a/tests/ideas/rules/test_rules_view.py b/tests/ideas/rules/test_rules_view.py index 161c025c97..df31f756fb 100644 --- a/tests/ideas/rules/test_rules_view.py +++ b/tests/ideas/rules/test_rules_view.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.ideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_ideas.view_idea' @@ -17,41 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, idea_factory, user): +def test_pre_phase( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, idea_factory, user): +def test_phase_active( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, idea_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, idea_factory, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +85,28 @@ def test_phase_active_project_private(phase_factory, idea_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, idea_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, idea_factory, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, idea_factory, assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, idea_factory, user): +def test_phase_active_project_draft( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, idea_factory, user): +def test_post_phase_project_archived( + phase_factory, idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, idea_factory, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/kiezkasse/rules/test_rules_add.py b/tests/kiezkasse/rules/test_rules_add.py index 0c5a7abc1c..5dbbd380b2 100644 --- a/tests/kiezkasse/rules/test_rules_add.py +++ b/tests/kiezkasse/rules/test_rules_add.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.kiezkasse import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_kiezkasse.add_proposal' @@ -17,60 +18,84 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, user): +def test_pre_phase( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active(phase_factory, user): +def test_phase_active( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, user, user2): +def test_phase_active_project_private( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, participant, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, user, user2): +def test_phase_active_project_semipublic( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +103,53 @@ def test_phase_active_project_semipublic(phase_factory, user, user2): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) assert rules.has_perm(perm_name, participant, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, user): +def test_phase_active_project_draft( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, user): +def test_post_phase_project_archived( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) diff --git a/tests/kiezkasse/rules/test_rules_change.py b/tests/kiezkasse/rules/test_rules_change.py index 98fb1f16db..2b4ed0dc24 100644 --- a/tests/kiezkasse/rules/test_rules_change.py +++ b/tests/kiezkasse/rules/test_rules_change.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.kiezkasse import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_kiezkasse.change_proposal' @@ -17,47 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, proposal_factory, user): +def test_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -66,20 +86,27 @@ def test_phase_active_project_private(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -88,39 +115,59 @@ def test_phase_active_project_semipublic(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/kiezkasse/rules/test_rules_comment.py b/tests/kiezkasse/rules/test_rules_comment.py index 26fcfdcd29..cc5e5b7f30 100644 --- a/tests/kiezkasse/rules/test_rules_comment.py +++ b/tests/kiezkasse/rules/test_rules_comment.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.kiezkasse import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_kiezkasse.comment_proposal' @@ -17,42 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, proposal_factory, user): +def test_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -60,19 +85,28 @@ def test_phase_active_project_private(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -80,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/kiezkasse/rules/test_rules_moderate.py b/tests/kiezkasse/rules/test_rules_moderate.py index 9e77b4dc5c..acdf68e5c0 100644 --- a/tests/kiezkasse/rules/test_rules_moderate.py +++ b/tests/kiezkasse/rules/test_rules_moderate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.kiezkasse import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_kiezkasse.moderate_proposal' @@ -17,47 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, proposal_factory, user): +def test_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -66,20 +86,27 @@ def test_phase_active_project_private(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -88,39 +115,59 @@ def test_phase_active_project_semipublic(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) diff --git a/tests/kiezkasse/rules/test_rules_rate.py b/tests/kiezkasse/rules/test_rules_rate.py index ee5b0ca61e..edeb7ce0b3 100644 --- a/tests/kiezkasse/rules/test_rules_rate.py +++ b/tests/kiezkasse/rules/test_rules_rate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.kiezkasse import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_kiezkasse.rate_proposal' @@ -17,42 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.FeedbackPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, proposal_factory, user): +def test_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.FeedbackPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.FeedbackPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -60,19 +85,28 @@ def test_phase_active_project_private(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.FeedbackPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -80,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.FeedbackPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.FeedbackPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/kiezkasse/rules/test_rules_view.py b/tests/kiezkasse/rules/test_rules_view.py index fa9372340f..ad6f2b1122 100644 --- a/tests/kiezkasse/rules/test_rules_view.py +++ b/tests/kiezkasse/rules/test_rules_view.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.kiezkasse import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_kiezkasse.view_proposal' @@ -17,41 +18,65 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, proposal_factory, user): +def test_pre_phase( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, proposal_factory, user): +def test_phase_active( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +84,27 @@ def test_phase_active_project_private(phase_factory, proposal_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, proposal_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, proposal_factory, phases.RequestPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +112,60 @@ def test_phase_active_project_semipublic(phase_factory, proposal_factory, assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, proposal_factory, user): +def test_phase_active_project_draft( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, proposal_factory, user): +def test_post_phase_project_archived( + phase_factory, proposal_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, proposal_factory, phases.RequestPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/livequestions/rules/test_rules_add.py b/tests/livequestions/rules/test_rules_add.py index 9fba3c30a6..1aaa9051d0 100644 --- a/tests/livequestions/rules/test_rules_add.py +++ b/tests/livequestions/rules/test_rules_add.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.livequestions import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_livequestions.add_livequestion' @@ -17,41 +18,58 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, user): +def test_pre_phase( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.IssuePhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert not rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert not rules.has_perm(perm_name, initiator, module) + assert not rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active(phase_factory, user): +def test_phase_active( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.IssuePhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, user, user2): +def test_phase_active_project_private( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.IssuePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +77,25 @@ def test_phase_active_project_private(phase_factory, user, user2): assert rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) assert rules.has_perm(perm_name, participant, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, user, user2): +def test_phase_active_project_semipublic( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.IssuePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,20 +103,31 @@ def test_phase_active_project_semipublic(phase_factory, user, user2): assert rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) assert rules.has_perm(perm_name, participant, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, user): +def test_post_phase_project_archived( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.IssuePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert not rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert not rules.has_perm(perm_name, initiator, module) + assert not rules.has_perm(perm_name, admin, module) diff --git a/tests/livequestions/rules/test_rules_change.py b/tests/livequestions/rules/test_rules_change.py index ab1cc538ad..d0137a8fad 100644 --- a/tests/livequestions/rules/test_rules_change.py +++ b/tests/livequestions/rules/test_rules_change.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.livequestions import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_livequestions.change_livequestion' @@ -17,100 +18,152 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, live_question_factory, user): +def test_pre_phase( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, live_question_factory, user): +def test_phase_active( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, - live_question_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, live_question_factory, phases.IssuePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, - live_question_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, live_question_factory, phases.IssuePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, - live_question_factory, - user): +def test_phase_active_project_draft( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, - live_question_factory, - user): +def test_post_phase_project_archived( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/livequestions/rules/test_rules_moderate.py b/tests/livequestions/rules/test_rules_moderate.py index a54acaee45..a6688797af 100644 --- a/tests/livequestions/rules/test_rules_moderate.py +++ b/tests/livequestions/rules/test_rules_moderate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.livequestions import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_livequestions.moderate_livequestions' @@ -17,42 +18,65 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, live_question_factory, user): +def test_pre_phase( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, live_question_factory, user): +def test_phase_active( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, - live_question_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, live_question_factory, phases.IssuePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -60,19 +84,27 @@ def test_phase_active_project_private(phase_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, - live_question_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, live_question_factory, phases.IssuePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -80,39 +112,60 @@ def test_phase_active_project_semipublic(phase_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, - live_question_factory, - user): +def test_phase_active_project_draft( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, - live_question_factory, - user): +def test_post_phase_project_archived( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/livequestions/rules/test_rules_view.py b/tests/livequestions/rules/test_rules_view.py index 1e6922bac1..c571e03f21 100644 --- a/tests/livequestions/rules/test_rules_view.py +++ b/tests/livequestions/rules/test_rules_view.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.livequestions import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_livequestions.view_livequestion' @@ -17,41 +18,65 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, live_question_factory, user): +def test_pre_phase( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, live_question_factory, user): +def test_phase_active( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, live_question_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, live_question_factory, phases.IssuePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +84,27 @@ def test_phase_active_project_private(phase_factory, live_question_factory, assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, live_question_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, live_question_factory, phases.IssuePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,22 +112,35 @@ def test_phase_active_project_semipublic(phase_factory, live_question_factory, assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, - live_question_factory, - user): +def test_post_phase_project_archived( + phase_factory, live_question_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, live_question_factory, phases.IssuePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/mapideas/rules/test_rules_add.py b/tests/mapideas/rules/test_rules_add.py index fbc6fd0a1e..fa23bb29ed 100644 --- a/tests/mapideas/rules/test_rules_add.py +++ b/tests/mapideas/rules/test_rules_add.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.mapideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_mapideas.add_mapidea' @@ -17,41 +18,58 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, user): +def test_pre_phase( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active(phase_factory, user): +def test_phase_active( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert rules.has_perm(perm_name, user, module) + assert rules.has_perm(perm_name, group_member_out, module) + assert rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, user, user2): +def test_phase_active_project_private( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +77,25 @@ def test_phase_active_project_private(phase_factory, user, user2): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) assert rules.has_perm(perm_name, participant, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, user, user2): +def test_phase_active_project_semipublic( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +103,53 @@ def test_phase_active_project_semipublic(phase_factory, user, user2): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) assert rules.has_perm(perm_name, participant, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, user): +def test_phase_active_project_draft( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, user): +def test_post_phase_project_archived( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) diff --git a/tests/mapideas/rules/test_rules_change.py b/tests/mapideas/rules/test_rules_change.py index 324f679f33..72d51ef641 100644 --- a/tests/mapideas/rules/test_rules_change.py +++ b/tests/mapideas/rules/test_rules_change.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.mapideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_mapideas.change_mapidea' @@ -17,47 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, map_idea_factory, user): +def test_pre_phase( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, map_idea_factory, user): +def test_phase_active( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -66,20 +86,27 @@ def test_phase_active_project_private(phase_factory, map_idea_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -88,39 +115,59 @@ def test_phase_active_project_semipublic(phase_factory, map_idea_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, map_idea_factory, user): +def test_phase_active_project_draft( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, map_idea_factory, user): +def test_post_phase_project_archived( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/mapideas/rules/test_rules_comment.py b/tests/mapideas/rules/test_rules_comment.py index 388641e968..827c84bd81 100644 --- a/tests/mapideas/rules/test_rules_comment.py +++ b/tests/mapideas/rules/test_rules_comment.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.mapideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_mapideas.comment_mapidea' @@ -17,42 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, map_idea_factory, user): +def test_pre_phase( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, map_idea_factory, user): +def test_phase_active( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -60,19 +85,28 @@ def test_phase_active_project_private(phase_factory, map_idea_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -80,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, map_idea_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, map_idea_factory, user): +def test_phase_active_project_draft( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, map_idea_factory, user): +def test_post_phase_project_archived( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/mapideas/rules/test_rules_moderate.py b/tests/mapideas/rules/test_rules_moderate.py index cc5096ac16..811b65e68e 100644 --- a/tests/mapideas/rules/test_rules_moderate.py +++ b/tests/mapideas/rules/test_rules_moderate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.mapideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_mapideas.moderate_mapidea' @@ -17,47 +18,69 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, map_idea_factory, user): +def test_pre_phase( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, map_idea_factory, user): +def test_phase_active( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -66,20 +89,28 @@ def test_phase_active_project_private(phase_factory, map_idea_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -88,39 +119,61 @@ def test_phase_active_project_semipublic(phase_factory, map_idea_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, map_idea_factory, user): +def test_phase_active_project_draft( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, map_idea_factory, user): +def test_post_phase_project_archived( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + creator = item.creator assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert not rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert not rules.has_perm(perm_name, admin, item) diff --git a/tests/mapideas/rules/test_rules_rate.py b/tests/mapideas/rules/test_rules_rate.py index 9e6ae84ef2..35f1be7d0b 100644 --- a/tests/mapideas/rules/test_rules_rate.py +++ b/tests/mapideas/rules/test_rules_rate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.mapideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_mapideas.rate_mapidea' @@ -17,42 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, map_idea_factory, user): +def test_pre_phase( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, map_idea_factory, user): +def test_phase_active( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.RatingPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.RatingPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -60,19 +85,28 @@ def test_phase_active_project_private(phase_factory, map_idea_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.RatingPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -80,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, map_idea_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, map_idea_factory, user): +def test_phase_active_project_draft( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.RatingPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, map_idea_factory, user): +def test_post_phase_project_archived( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.RatingPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/mapideas/rules/test_rules_view.py b/tests/mapideas/rules/test_rules_view.py index d5f066216f..e1f1c0ea59 100644 --- a/tests/mapideas/rules/test_rules_view.py +++ b/tests/mapideas/rules/test_rules_view.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.mapideas import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_mapideas.view_mapidea' @@ -17,41 +18,65 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, map_idea_factory, user): +def test_pre_phase( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, map_idea_factory, user): +def test_phase_active( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.CollectPhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +84,27 @@ def test_phase_active_project_private(phase_factory, map_idea_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, map_idea_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, map_idea_factory, phases.CollectPhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +112,60 @@ def test_phase_active_project_semipublic(phase_factory, map_idea_factory, assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, map_idea_factory, user): +def test_phase_active_project_draft( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, map_idea_factory, user): +def test_post_phase_project_archived( + phase_factory, map_idea_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, map_idea_factory, phases.CollectPhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/maptopicprio/rules/test_rules_add.py b/tests/maptopicprio/rules/test_rules_add.py index f19259a1a3..63dab93c0f 100644 --- a/tests/maptopicprio/rules/test_rules_add.py +++ b/tests/maptopicprio/rules/test_rules_add.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.maptopicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_maptopicprio.add_maptopic' @@ -17,41 +18,58 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, user): +def test_pre_phase( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active(phase_factory, user): +def test_phase_active( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, user, user2): +def test_phase_active_project_private( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.PrioritizePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +77,25 @@ def test_phase_active_project_private(phase_factory, user, user2): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) assert not rules.has_perm(perm_name, participant, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, user, user2): +def test_phase_active_project_semipublic( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +103,53 @@ def test_phase_active_project_semipublic(phase_factory, user, user2): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) assert not rules.has_perm(perm_name, participant, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, user): +def test_phase_active_project_draft( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, user): +def test_post_phase_project_archived( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) diff --git a/tests/maptopicprio/rules/test_rules_change.py b/tests/maptopicprio/rules/test_rules_change.py index ae704fba00..26499cc8c9 100644 --- a/tests/maptopicprio/rules/test_rules_change.py +++ b/tests/maptopicprio/rules/test_rules_change.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.maptopicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_maptopicprio.change_maptopic' @@ -17,46 +18,65 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, maptopic_factory, user): +def test_pre_phase( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, maptopic_factory, user): +def test_phase_active( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, maptopic_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator - participant = user2 + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -65,19 +85,26 @@ def test_phase_active_project_private(phase_factory, maptopic_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, maptopic_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator - participant = user2 + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -86,39 +113,59 @@ def test_phase_active_project_semipublic(phase_factory, maptopic_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, maptopic_factory, user): +def test_phase_active_project_draft( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, maptopic_factory, user): +def test_post_phase_project_archived( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/maptopicprio/rules/test_rules_comment.py b/tests/maptopicprio/rules/test_rules_comment.py index d5ca03b81d..ceb796c14d 100644 --- a/tests/maptopicprio/rules/test_rules_comment.py +++ b/tests/maptopicprio/rules/test_rules_comment.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.maptopicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_maptopicprio.comment_maptopic' @@ -17,41 +18,65 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, maptopic_factory, user): +def test_pre_phase( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, maptopic_factory, user): +def test_phase_active( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, maptopic_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +84,27 @@ def test_phase_active_project_private(phase_factory, maptopic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, maptopic_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +112,60 @@ def test_phase_active_project_semipublic(phase_factory, maptopic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, maptopic_factory, user): +def test_phase_active_project_draft( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, maptopic_factory, user): +def test_post_phase_project_archived( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/maptopicprio/rules/test_rules_rate.py b/tests/maptopicprio/rules/test_rules_rate.py index 4ce517c2e5..f16febfe65 100644 --- a/tests/maptopicprio/rules/test_rules_rate.py +++ b/tests/maptopicprio/rules/test_rules_rate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.maptopicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_maptopicprio.rate_maptopic' @@ -17,41 +18,65 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, maptopic_factory, user): +def test_pre_phase( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, maptopic_factory, user): +def test_phase_active( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, maptopic_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +84,27 @@ def test_phase_active_project_private(phase_factory, maptopic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, maptopic_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +112,60 @@ def test_phase_active_project_semipublic(phase_factory, maptopic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, maptopic_factory, user): +def test_phase_active_project_draft( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, maptopic_factory, user): +def test_post_phase_project_archived( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/maptopicprio/rules/test_rules_view.py b/tests/maptopicprio/rules/test_rules_view.py index d49ccbc7d6..9ff66a5a00 100644 --- a/tests/maptopicprio/rules/test_rules_view.py +++ b/tests/maptopicprio/rules/test_rules_view.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.maptopicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_maptopicprio.view_maptopic' @@ -17,41 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, maptopic_factory, user): +def test_pre_phase( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, maptopic_factory, user): +def test_phase_active( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, maptopic_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -59,18 +85,28 @@ def test_phase_active_project_private(phase_factory, maptopic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, maptopic_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -78,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, maptopic_factory, assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, maptopic_factory, user): +def test_phase_active_project_draft( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, maptopic_factory, user): +def test_post_phase_project_archived( + phase_factory, maptopic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, maptopic_factory, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/offlineevents/rules/test_rules_add.py b/tests/offlineevents/rules/test_rules_add.py index cb8d975527..8033852919 100644 --- a/tests/offlineevents/rules/test_rules_add.py +++ b/tests/offlineevents/rules/test_rules_add.py @@ -2,7 +2,7 @@ import rules from adhocracy4.test.helpers import setup_users -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_offlineevents.add_offlineevent' @@ -18,13 +18,13 @@ def test_rule(offline_event, user_factory, group_factory, project = offline_event.project anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert not rules.has_perm(perm_name, anonymous, project) assert not rules.has_perm(perm_name, user, project) - assert not rules.has_perm(perm_name, group_member_in_orga, project) + assert not rules.has_perm(perm_name, group_member_in_org, project) assert not rules.has_perm(perm_name, group_member_out, project) assert not rules.has_perm(perm_name, moderator, project) - assert rules.has_perm(perm_name, group_member_in_project, project) + assert rules.has_perm(perm_name, group_member_in_pro, project) assert rules.has_perm(perm_name, initiator, project) diff --git a/tests/offlineevents/rules/test_rules_change.py b/tests/offlineevents/rules/test_rules_change.py index 616629767f..760e5c3627 100644 --- a/tests/offlineevents/rules/test_rules_change.py +++ b/tests/offlineevents/rules/test_rules_change.py @@ -2,7 +2,7 @@ import rules from adhocracy4.test.helpers import setup_users -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_offlineevents.change_offlineevent' @@ -18,13 +18,13 @@ def test_rule(offline_event, user_factory, group_factory, project = offline_event.project anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert not rules.has_perm(perm_name, anonymous, project) assert not rules.has_perm(perm_name, user, project) - assert not rules.has_perm(perm_name, group_member_in_orga, project) + assert not rules.has_perm(perm_name, group_member_in_org, project) assert not rules.has_perm(perm_name, group_member_out, project) assert not rules.has_perm(perm_name, moderator, project) - assert rules.has_perm(perm_name, group_member_in_project, project) + assert rules.has_perm(perm_name, group_member_in_pro, project) assert rules.has_perm(perm_name, initiator, project) diff --git a/tests/offlineevents/rules/test_rules_list.py b/tests/offlineevents/rules/test_rules_list.py index ba9b58117a..f0641b3bff 100644 --- a/tests/offlineevents/rules/test_rules_list.py +++ b/tests/offlineevents/rules/test_rules_list.py @@ -2,7 +2,7 @@ import rules from adhocracy4.test.helpers import setup_users -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_offlineevents.list_offlineevent' @@ -18,13 +18,13 @@ def test_rule(offline_event, user_factory, group_factory, project = offline_event.project anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert not rules.has_perm(perm_name, anonymous, project) assert not rules.has_perm(perm_name, user, project) - assert not rules.has_perm(perm_name, group_member_in_orga, project) + assert not rules.has_perm(perm_name, group_member_in_org, project) assert not rules.has_perm(perm_name, group_member_out, project) assert not rules.has_perm(perm_name, moderator, project) - assert rules.has_perm(perm_name, group_member_in_project, project) + assert rules.has_perm(perm_name, group_member_in_pro, project) assert rules.has_perm(perm_name, initiator, project) diff --git a/tests/offlineevents/rules/test_rules_view.py b/tests/offlineevents/rules/test_rules_view.py index 49492e374e..283437d4df 100644 --- a/tests/offlineevents/rules/test_rules_view.py +++ b/tests/offlineevents/rules/test_rules_view.py @@ -2,7 +2,7 @@ import rules from adhocracy4.test.helpers import setup_users -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_offlineevents.view_offlineevent' @@ -18,16 +18,16 @@ def test_rule(offline_event, user_factory, group_factory, project = offline_event.project anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert rules.has_perm(perm_name, anonymous, offline_event) assert rules.has_perm(perm_name, user, offline_event) assert rules.has_perm(perm_name, moderator, offline_event) assert rules.has_perm(perm_name, initiator, offline_event) - assert rules.has_perm(perm_name, group_member_in_orga, offline_event) + assert rules.has_perm(perm_name, group_member_in_org, offline_event) assert rules.has_perm(perm_name, group_member_out, offline_event) - assert rules.has_perm(perm_name, group_member_in_project, offline_event) + assert rules.has_perm(perm_name, group_member_in_pro, offline_event) @pytest.mark.django_db @@ -38,15 +38,15 @@ def test_rule_project_draft(offline_event_factory, user_factory, group_factory, project = offline_event.project anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft assert not rules.has_perm(perm_name, anonymous, offline_event) assert not rules.has_perm(perm_name, user, offline_event) - assert not rules.has_perm(perm_name, group_member_in_orga, offline_event) + assert not rules.has_perm(perm_name, group_member_in_org, offline_event) assert not rules.has_perm(perm_name, group_member_out, offline_event) - assert rules.has_perm(perm_name, group_member_in_project, offline_event) + assert rules.has_perm(perm_name, group_member_in_pro, offline_event) assert rules.has_perm(perm_name, moderator, offline_event) assert rules.has_perm(perm_name, initiator, offline_event) @@ -59,14 +59,14 @@ def test_rule_project_archived(offline_event_factory, user_factory, project = offline_event.project anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived assert rules.has_perm(perm_name, anonymous, offline_event) assert rules.has_perm(perm_name, user, offline_event) - assert rules.has_perm(perm_name, group_member_in_orga, offline_event) + assert rules.has_perm(perm_name, group_member_in_org, offline_event) assert rules.has_perm(perm_name, group_member_out, offline_event) - assert rules.has_perm(perm_name, group_member_in_project, offline_event) + assert rules.has_perm(perm_name, group_member_in_pro, offline_event) assert rules.has_perm(perm_name, moderator, offline_event) assert rules.has_perm(perm_name, initiator, offline_event) diff --git a/tests/plans/rules/test_rules_add.py b/tests/plans/rules/test_rules_add.py index d817ef4557..b535beca1b 100644 --- a/tests/plans/rules/test_rules_add.py +++ b/tests/plans/rules/test_rules_add.py @@ -2,7 +2,7 @@ import rules from adhocracy4.test.helpers import setup_users -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_plans.add_plan' @@ -18,14 +18,14 @@ def test_rule(plan, user_factory, group_factory, plan.projects.add(project) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert not rules.has_perm(perm_name, anonymous, plan.organisation) assert not rules.has_perm(perm_name, user, plan.organisation) assert not rules.has_perm(perm_name, moderator, plan.organisation) assert not rules.has_perm(perm_name, group_member_out, plan.organisation) - assert not rules.has_perm(perm_name, group_member_in_project, + assert not rules.has_perm(perm_name, group_member_in_pro, plan.organisation) - assert rules.has_perm(perm_name, group_member_in_orga, plan.organisation) + assert rules.has_perm(perm_name, group_member_in_org, plan.organisation) assert rules.has_perm(perm_name, initiator, plan.organisation) diff --git a/tests/plans/rules/test_rules_change.py b/tests/plans/rules/test_rules_change.py index 73583b71fc..ac741f6758 100644 --- a/tests/plans/rules/test_rules_change.py +++ b/tests/plans/rules/test_rules_change.py @@ -2,7 +2,7 @@ import rules from adhocracy4.test.helpers import setup_users -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_plans.change_plan' @@ -18,13 +18,13 @@ def test_rule(plan, user_factory, group_factory, plan.projects.add(project) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert not rules.has_perm(perm_name, anonymous, plan) assert not rules.has_perm(perm_name, user, plan) assert not rules.has_perm(perm_name, moderator, plan) - assert not rules.has_perm(perm_name, group_member_in_orga, plan) + assert not rules.has_perm(perm_name, group_member_in_org, plan) assert not rules.has_perm(perm_name, group_member_out, plan) - assert not rules.has_perm(perm_name, group_member_in_project, plan) + assert not rules.has_perm(perm_name, group_member_in_pro, plan) assert rules.has_perm(perm_name, initiator, plan) diff --git a/tests/plans/rules/test_rules_export.py b/tests/plans/rules/test_rules_export.py index b4d4800577..332c7728a4 100644 --- a/tests/plans/rules/test_rules_export.py +++ b/tests/plans/rules/test_rules_export.py @@ -2,7 +2,7 @@ import rules from adhocracy4.test.helpers import setup_users -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_plans.export_plan' @@ -18,14 +18,14 @@ def test_rule(plan, user_factory, group_factory, plan.projects.add(project) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert not rules.has_perm(perm_name, anonymous, plan.organisation) assert not rules.has_perm(perm_name, user, plan.organisation) assert not rules.has_perm(perm_name, moderator, plan.organisation) assert not rules.has_perm(perm_name, group_member_out, plan.organisation) - assert not rules.has_perm(perm_name, group_member_in_project, + assert not rules.has_perm(perm_name, group_member_in_pro, plan.organisation) - assert rules.has_perm(perm_name, group_member_in_orga, plan.organisation) + assert rules.has_perm(perm_name, group_member_in_org, plan.organisation) assert rules.has_perm(perm_name, initiator, plan.organisation) diff --git a/tests/plans/rules/test_rules_list.py b/tests/plans/rules/test_rules_list.py index a7ff05cbd8..8e72d6b607 100644 --- a/tests/plans/rules/test_rules_list.py +++ b/tests/plans/rules/test_rules_list.py @@ -2,7 +2,7 @@ import rules from adhocracy4.test.helpers import setup_users -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_plans.list_plan' @@ -18,13 +18,13 @@ def test_rule(plan, user_factory, group_factory, plan.projects.add(project) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert rules.has_perm(perm_name, anonymous, None) assert rules.has_perm(perm_name, user, None) assert rules.has_perm(perm_name, moderator, None) assert rules.has_perm(perm_name, initiator, None) - assert rules.has_perm(perm_name, group_member_in_orga, None) + assert rules.has_perm(perm_name, group_member_in_org, None) assert rules.has_perm(perm_name, group_member_out, None) - assert rules.has_perm(perm_name, group_member_in_project, None) + assert rules.has_perm(perm_name, group_member_in_pro, None) diff --git a/tests/plans/rules/test_rules_view.py b/tests/plans/rules/test_rules_view.py index ae843d9266..f761d0f131 100644 --- a/tests/plans/rules/test_rules_view.py +++ b/tests/plans/rules/test_rules_view.py @@ -2,7 +2,7 @@ import rules from adhocracy4.test.helpers import setup_users -from tests.helpers import setup_group_users +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_plans.view_plan' @@ -18,16 +18,16 @@ def test_rule(plan, user_factory, group_factory, plan.projects.add(project) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert rules.has_perm(perm_name, anonymous, plan) assert rules.has_perm(perm_name, user, plan) assert rules.has_perm(perm_name, moderator, plan) assert rules.has_perm(perm_name, initiator, plan) - assert rules.has_perm(perm_name, group_member_in_orga, plan) + assert rules.has_perm(perm_name, group_member_in_org, plan) assert rules.has_perm(perm_name, group_member_out, plan) - assert rules.has_perm(perm_name, group_member_in_project, plan) + assert rules.has_perm(perm_name, group_member_in_pro, plan) @pytest.mark.django_db @@ -38,13 +38,13 @@ def test_rule_plan_draft(plan_factory, user_factory, group_factory, plan.projects.add(project) anonymous, moderator, initiator = setup_users(project) - group_member_in_orga, group_member_out, group_member_in_project, project \ - = setup_group_users(user_factory, group_factory, project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert not rules.has_perm(perm_name, anonymous, plan) assert not rules.has_perm(perm_name, user, plan) assert not rules.has_perm(perm_name, moderator, plan) - assert not rules.has_perm(perm_name, group_member_in_orga, plan) + assert not rules.has_perm(perm_name, group_member_in_org, plan) assert not rules.has_perm(perm_name, group_member_out, plan) - assert not rules.has_perm(perm_name, group_member_in_project, plan) + assert not rules.has_perm(perm_name, group_member_in_pro, plan) assert rules.has_perm(perm_name, initiator, plan) diff --git a/tests/projectcontainers/dashboard_components/test_views_container_basic.py b/tests/projectcontainers/dashboard_components/test_views_container_basic.py index 3e11c2f1d5..8e2ba342b8 100644 --- a/tests/projectcontainers/dashboard_components/test_views_container_basic.py +++ b/tests/projectcontainers/dashboard_components/test_views_container_basic.py @@ -3,7 +3,7 @@ from adhocracy4.dashboard import components from meinberlin.test.helpers import assert_dashboard_form_component_edited from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('container-basic') @@ -34,10 +34,10 @@ def test_edit_view(client, project, project_container_factory): def test_edit_view_group_member(client, project, project_container_factory, group_factory, user_factory): project_container = project_container_factory(projects=[project]) - group_member, _, project_container = setup_group_member( - None, project_container, group_factory, user_factory) + project_container, _, group_member_in_pro, _ = \ + setup_group_members(project_container, group_factory, user_factory) url = component.get_base_url(project_container) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/projectcontainers/dashboard_components/test_views_container_information.py b/tests/projectcontainers/dashboard_components/test_views_container_information.py index b415487a94..8674aed51f 100644 --- a/tests/projectcontainers/dashboard_components/test_views_container_information.py +++ b/tests/projectcontainers/dashboard_components/test_views_container_information.py @@ -3,7 +3,7 @@ from adhocracy4.dashboard import components from meinberlin.test.helpers import assert_dashboard_form_component_edited from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('container-information') @@ -31,10 +31,10 @@ def test_edit_view(client, project, project_container_factory): def test_edit_view_group_member(client, project, project_container_factory, group_factory, user_factory): project_container = project_container_factory(projects=[project]) - group_member, _, project_container = setup_group_member( - None, project_container, group_factory, user_factory) + project_container, _, group_member_in_pro, _ = \ + setup_group_members(project_container, group_factory, user_factory) url = component.get_base_url(project_container) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/projectcontainers/dashboard_components/test_views_container_plans.py b/tests/projectcontainers/dashboard_components/test_views_container_plans.py index ddd931a4ed..0197434c12 100644 --- a/tests/projectcontainers/dashboard_components/test_views_container_plans.py +++ b/tests/projectcontainers/dashboard_components/test_views_container_plans.py @@ -3,7 +3,7 @@ from adhocracy4.dashboard import components from adhocracy4.test.helpers import redirect_target from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('plans') @@ -32,11 +32,12 @@ def test_edit_view(client, plan_factory, project_container): @pytest.mark.django_db def test_edit_view_group_member(client, plan_factory, project_container, group_factory, user_factory): - group_member, organisation, project_container = setup_group_member( - None, project_container, group_factory, user_factory) + project_container, _, group_member_in_pro, _ = \ + setup_group_members(project_container, group_factory, user_factory) + organisation = project_container.organisation plan = plan_factory(organisation=organisation) url = component.get_base_url(project_container) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/projectcontainers/dashboard_components/test_views_container_point.py b/tests/projectcontainers/dashboard_components/test_views_container_point.py index 7ef3dd483a..fb88388c22 100644 --- a/tests/projectcontainers/dashboard_components/test_views_container_point.py +++ b/tests/projectcontainers/dashboard_components/test_views_container_point.py @@ -4,7 +4,7 @@ from adhocracy4.dashboard import components from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('point') @@ -41,10 +41,10 @@ def test_edit_view(client, project_container, administrative_district): def test_edit_view_group_member( client, project_container, administrative_district, group_factory, user_factory): - group_member, _, project_container = setup_group_member( - None, project_container, group_factory, user_factory) + project_container, _, group_member_in_pro, _ = \ + setup_group_members(project_container, group_factory, user_factory) url = component.get_base_url(project_container) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/projectcontainers/dashboard_components/test_views_container_topics.py b/tests/projectcontainers/dashboard_components/test_views_container_topics.py index 8c3419ae72..ca3b8f85f5 100644 --- a/tests/projectcontainers/dashboard_components/test_views_container_topics.py +++ b/tests/projectcontainers/dashboard_components/test_views_container_topics.py @@ -4,7 +4,7 @@ from adhocracy4.dashboard import components from adhocracy4.test.helpers import redirect_target from meinberlin.test.helpers import assert_dashboard_form_component_response -from meinberlin.test.helpers import setup_group_member +from meinberlin.test.helpers import setup_group_members component = components.projects.get('topics') @@ -33,10 +33,10 @@ def test_edit_view(client, project_container): @pytest.mark.django_db def test_edit_view_group_member(client, project_container, group_factory, user_factory): - group_member, _, project_container = setup_group_member( - None, project_container, group_factory, user_factory) + project_container, _, group_member_in_pro, _ = \ + setup_group_members(project_container, group_factory, user_factory) url = component.get_base_url(project_container) - client.login(username=group_member.email, password='password') + client.login(username=group_member_in_pro.email, password='password') response = client.get(url) assert_dashboard_form_component_response(response, component) diff --git a/tests/projects/rules/test_rules_participate.py b/tests/projects/rules/test_rules_participate.py index a649c5f741..776ee01152 100644 --- a/tests/projects/rules/test_rules_participate.py +++ b/tests/projects/rules/test_rules_participate.py @@ -40,8 +40,8 @@ def test_participate_project_draft(user_factory, group_factory, organisation, assert not rules.has_perm(perm_name_participate, group_member_in_orga, project) assert not rules.has_perm(perm_name_participate, group_member_out, project) - assert not rules.has_perm(perm_name_participate, group_member_in_project, - project) + assert rules.has_perm(perm_name_participate, group_member_in_project, + project) assert rules.has_perm(perm_name_participate, moderator, project) assert rules.has_perm(perm_name_participate, initiator, project) assert rules.has_perm(perm_name_participate, admin, project) @@ -150,8 +150,8 @@ def test_participate_private_project(user_factory, group_factory, assert not rules.has_perm(perm_name_participate, group_member_in_orga, project) assert not rules.has_perm(perm_name_participate, group_member_out, project) - assert not rules.has_perm(perm_name_participate, group_member_in_project, - project) + assert rules.has_perm(perm_name_participate, group_member_in_project, + project) assert rules.has_perm(perm_name_participate, participant, project) assert rules.has_perm(perm_name_participate, moderator, project) assert rules.has_perm(perm_name_participate, initiator, project) @@ -190,8 +190,8 @@ def test_participate_semiprivate_project(user_factory, group_factory, assert not rules.has_perm(perm_name_participate, group_member_in_orga, project) assert not rules.has_perm(perm_name_participate, group_member_out, project) - assert not rules.has_perm(perm_name_participate, group_member_in_project, - project) + assert rules.has_perm(perm_name_participate, group_member_in_project, + project) assert rules.has_perm(perm_name_participate, participant, project) assert rules.has_perm(perm_name_participate, moderator, project) assert rules.has_perm(perm_name_participate, initiator, project) diff --git a/tests/topicprio/rules/test_rules_add.py b/tests/topicprio/rules/test_rules_add.py index ab0933557d..6df053105f 100644 --- a/tests/topicprio/rules/test_rules_add.py +++ b/tests/topicprio/rules/test_rules_add.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.topicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_topicprio.add_topic' @@ -17,96 +18,138 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, user): +def test_pre_phase( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active(phase_factory, user): +def test_phase_active( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, user, user2): +def test_phase_active_project_private( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.PrioritizePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, participant, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, user, user2): +def test_phase_active_project_semipublic( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase( phase_factory, None, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, participant, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, user): +def test_phase_active_project_draft( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, user): +def test_post_phase_project_archived( + phase_factory, user, admin, user_factory, group_factory): phase, module, project, _ = setup_phase(phase_factory, None, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, module) assert not rules.has_perm(perm_name, user, module) + assert not rules.has_perm(perm_name, group_member_out, module) + assert not rules.has_perm(perm_name, group_member_in_org, module) + assert rules.has_perm(perm_name, group_member_in_pro, module) assert not rules.has_perm(perm_name, moderator, module) assert rules.has_perm(perm_name, initiator, module) + assert rules.has_perm(perm_name, admin, module) diff --git a/tests/topicprio/rules/test_rules_change.py b/tests/topicprio/rules/test_rules_change.py index 29443412c4..75311e6242 100644 --- a/tests/topicprio/rules/test_rules_change.py +++ b/tests/topicprio/rules/test_rules_change.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.topicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_topicprio.change_topic' @@ -17,47 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, topic_factory, user): +def test_pre_phase( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, topic_factory, user): +def test_phase_active( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, topic_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, topic_factory, phases.PrioritizePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -66,20 +86,28 @@ def test_phase_active_project_private(phase_factory, topic_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, topic_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, topic_factory, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -88,39 +116,60 @@ def test_phase_active_project_semipublic(phase_factory, topic_factory, assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) assert not rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, topic_factory, user): +def test_phase_active_project_draft( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, topic_factory, user): +def test_post_phase_project_archived( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert not rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/topicprio/rules/test_rules_comment.py b/tests/topicprio/rules/test_rules_comment.py index a387bcd426..2104d4ba0f 100644 --- a/tests/topicprio/rules/test_rules_comment.py +++ b/tests/topicprio/rules/test_rules_comment.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.topicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_topicprio.comment_topic' @@ -17,42 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, topic_factory, user): +def test_pre_phase( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, topic_factory, user): +def test_phase_active( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, topic_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, topic_factory, phases.PrioritizePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -60,19 +85,28 @@ def test_phase_active_project_private(phase_factory, topic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, topic_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, topic_factory, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -80,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, topic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, topic_factory, user): +def test_phase_active_project_draft( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, topic_factory, user): +def test_post_phase_project_archived( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/topicprio/rules/test_rules_rate.py b/tests/topicprio/rules/test_rules_rate.py index 02caa0d823..d5ec874850 100644 --- a/tests/topicprio/rules/test_rules_rate.py +++ b/tests/topicprio/rules/test_rules_rate.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.topicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_topicprio.rate_topic' @@ -17,42 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, topic_factory, user): +def test_pre_phase( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_pre_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, topic_factory, user): +def test_phase_active( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_public with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, topic_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, topic_factory, phases.PrioritizePhase, module__project__access=Access.PRIVATE) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -60,19 +85,28 @@ def test_phase_active_project_private(phase_factory, topic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, topic_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, topic_factory, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) - participant = user2 + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -80,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, topic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, topic_factory, user): +def test_phase_active_project_draft( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, topic_factory, user): +def test_post_phase_project_archived( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) diff --git a/tests/topicprio/rules/test_rules_view.py b/tests/topicprio/rules/test_rules_view.py index c699747a5d..e55e4a364b 100644 --- a/tests/topicprio/rules/test_rules_view.py +++ b/tests/topicprio/rules/test_rules_view.py @@ -8,6 +8,7 @@ from adhocracy4.test.helpers import setup_phase from adhocracy4.test.helpers import setup_users from meinberlin.apps.topicprio import phases +from meinberlin.test.helpers import setup_group_members perm_name = 'meinberlin_topicprio.view_topic' @@ -17,42 +18,66 @@ def test_perm_exists(): @pytest.mark.django_db -def test_pre_phase(phase_factory, topic_factory, user): +def test_pre_phase( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_pre_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active(phase_factory, topic_factory, user): +def test_phase_active( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.access == Access.PUBLIC with freeze_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_private(phase_factory, topic_factory, - user, user2): +def test_phase_active_project_private( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, topic_factory, phases.PrioritizePhase, module__project__access=Access.PRIVATE ) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.PRIVATE @@ -60,19 +85,28 @@ def test_phase_active_project_private(phase_factory, topic_factory, assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_semipublic(phase_factory, topic_factory, - user, user2): +def test_phase_active_project_semipublic( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase( phase_factory, topic_factory, phases.PrioritizePhase, module__project__access=Access.SEMIPUBLIC ) anonymous, moderator, initiator = setup_users(project) - participant = user2 + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) + participant = user_factory() project.participants.add(participant) assert project.access == Access.SEMIPUBLIC @@ -80,35 +114,60 @@ def test_phase_active_project_semipublic(phase_factory, topic_factory, assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) assert rules.has_perm(perm_name, participant, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_phase_active_project_draft(phase_factory, topic_factory, user): +def test_phase_active_project_draft( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase, module__project__is_draft=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_draft with freeze_phase(phase): assert not rules.has_perm(perm_name, anonymous, item) assert not rules.has_perm(perm_name, user, item) + assert not rules.has_perm(perm_name, creator, item) + assert not rules.has_perm(perm_name, group_member_out, item) + assert not rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item) @pytest.mark.django_db -def test_post_phase_project_archived(phase_factory, topic_factory, user): +def test_post_phase_project_archived( + phase_factory, topic_factory, user, admin, user_factory, + group_factory): phase, _, project, item = setup_phase(phase_factory, topic_factory, phases.PrioritizePhase, module__project__is_archived=True) anonymous, moderator, initiator = setup_users(project) + creator = item.creator + project, group_member_in_org, group_member_in_pro, group_member_out = \ + setup_group_members(project, group_factory, user_factory) assert project.is_archived with freeze_post_phase(phase): assert rules.has_perm(perm_name, anonymous, item) assert rules.has_perm(perm_name, user, item) + assert rules.has_perm(perm_name, creator, item) + assert rules.has_perm(perm_name, group_member_out, item) + assert rules.has_perm(perm_name, group_member_in_org, item) + assert rules.has_perm(perm_name, group_member_in_pro, item) assert rules.has_perm(perm_name, moderator, item) assert rules.has_perm(perm_name, initiator, item) + assert rules.has_perm(perm_name, admin, item)
comic__grand-challenge.org-827
Update to Django 2.2 - [ ] Check that all Django apps support 2.2
[ { "content": "import os\nimport posixpath\nimport re\n\nfrom django.conf import settings\nfrom django.core.files import File\nfrom django.core.files.storage import DefaultStorage\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils._os import safe_join\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.exceptions import AuthenticationFailed\n\nfrom grandchallenge.cases.models import Image\nfrom grandchallenge.challenges.models import Challenge\nfrom grandchallenge.core.storage import ProtectedS3Storage\nfrom grandchallenge.evaluation.models import Submission\nfrom grandchallenge.serving.api import serve_file\nfrom grandchallenge.serving.permissions import (\n can_access,\n user_can_download_image,\n user_can_download_submission,\n)\n\n\ndef serve_fullpath(*, fullpath):\n storage = DefaultStorage()\n\n if not (os.path.abspath(fullpath) == fullpath) or not storage.exists(\n fullpath\n ):\n raise Http404(\"File not found.\")\n\n try:\n f = storage.open(fullpath, \"rb\")\n file = File(f)\n return serve_file(file, save_as=True)\n except IOError:\n raise Http404(\"File not found.\")\n\n\ndef protected_storage_redirect(*, name):\n # Get the storage with the internal redirect and auth. This will prepend\n # settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url'] to the url\n storage = ProtectedS3Storage(internal=True)\n\n if not storage.exists(name=name):\n raise Http404(\"File not found.\")\n\n url = storage.url(name=name)\n\n # Now strip the endpoint_url\n external_url = re.match(\n f\"^{settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url']}(.*)$\", url\n ).group(1)\n\n response = HttpResponse()\n response[\"X-Accel-Redirect\"] = external_url\n\n return response\n\n\ndef serve_folder(request, *, challenge_name=None, folder=None, path):\n \"\"\"\n Serve static files in a folder.\n\n If the file is in a challenge folder, then the subfolders of this challenge\n will be checked for permissions, see `can_access`.\n\n If the challenge_short_name is not set, then the folder must be set.\n ALL FILES IN THIS FOLDER WILL BE AVAILABLE TO DOWNLOAD.\n \"\"\"\n path = posixpath.normpath(path).lstrip(\"/\")\n\n if challenge_name:\n if folder:\n raise AttributeError(\"Only challenge_name or folder should be set\")\n\n challenge = get_object_or_404(\n Challenge, short_name__iexact=challenge_name\n )\n\n document_root = safe_join(settings.MEDIA_ROOT, challenge.short_name)\n fullpath = safe_join(document_root, path)\n allowed = can_access(\n request.user,\n fullpath[len(document_root) :].lstrip(\"/\"),\n challenge=challenge,\n )\n elif folder:\n document_root = safe_join(settings.MEDIA_ROOT, folder)\n fullpath = safe_join(document_root, path)\n allowed = True\n else:\n raise AttributeError(\"challenge_name or folder must be set\")\n\n if not allowed:\n raise Http404(\"File not found.\")\n\n return serve_fullpath(fullpath=fullpath)\n\n\ndef serve_images(request, *, pk, path):\n document_root = safe_join(f\"/{settings.IMAGE_FILES_SUBDIRECTORY}\", pk)\n path = posixpath.normpath(path).lstrip(\"/\")\n name = safe_join(document_root, path)\n\n try:\n image = Image.objects.get(pk=pk)\n except Image.DoesNotExist:\n raise Http404(\"File not found.\")\n\n try:\n user, _ = TokenAuthentication().authenticate(request)\n except (AuthenticationFailed, TypeError):\n user = request.user\n\n if user_can_download_image(user=user, image=image):\n return protected_storage_redirect(name=name)\n\n raise Http404(\"File not found.\")\n\n\ndef serve_submissions(request, *, submission_pk, **_):\n try:\n submission = Submission.objects.get(pk=submission_pk)\n except Submission.DoesNotExist:\n raise Http404(\"File not found.\")\n\n if user_can_download_submission(user=request.user, submission=submission):\n return protected_storage_redirect(name=submission.file.name)\n\n raise Http404(\"File not found.\")\n", "path": "app/grandchallenge/serving/views.py" } ]
[ { "content": "import os\nimport posixpath\nimport re\n\nfrom django.conf import settings\nfrom django.core.files import File\nfrom django.core.files.storage import DefaultStorage\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils._os import safe_join\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.exceptions import AuthenticationFailed\n\nfrom grandchallenge.cases.models import Image\nfrom grandchallenge.challenges.models import Challenge\nfrom grandchallenge.core.storage import ProtectedS3Storage\nfrom grandchallenge.evaluation.models import Submission\nfrom grandchallenge.serving.api import serve_file\nfrom grandchallenge.serving.permissions import (\n can_access,\n user_can_download_image,\n user_can_download_submission,\n)\n\n\ndef serve_fullpath(*, fullpath):\n storage = DefaultStorage()\n\n if not (os.path.abspath(fullpath) == fullpath) or not storage.exists(\n fullpath\n ):\n raise Http404(\"File not found.\")\n\n try:\n f = storage.open(fullpath, \"rb\")\n file = File(f)\n return serve_file(file, save_as=True)\n except IOError:\n raise Http404(\"File not found.\")\n\n\ndef protected_storage_redirect(*, name):\n # Get the storage with the internal redirect and auth. This will prepend\n # settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url'] to the url\n storage = ProtectedS3Storage(internal=True)\n\n if not storage.exists(name=name):\n raise Http404(\"File not found.\")\n\n url = storage.url(name=name)\n\n # Now strip the endpoint_url\n external_url = re.match(\n f\"^{settings.PROTECTED_S3_STORAGE_KWARGS['endpoint_url']}(.*)$\", url\n ).group(1)\n\n response = HttpResponse()\n response[\"X-Accel-Redirect\"] = external_url\n\n return response\n\n\ndef serve_folder(request, *, challenge_name=None, folder=None, path):\n \"\"\"\n Serve static files in a folder.\n\n If the file is in a challenge folder, then the subfolders of this challenge\n will be checked for permissions, see `can_access`.\n\n If the challenge_short_name is not set, then the folder must be set.\n ALL FILES IN THIS FOLDER WILL BE AVAILABLE TO DOWNLOAD.\n \"\"\"\n path = posixpath.normpath(path).lstrip(\"/\")\n\n if challenge_name:\n if folder:\n raise AttributeError(\"Only challenge_name or folder should be set\")\n\n challenge = get_object_or_404(\n Challenge, short_name__iexact=challenge_name\n )\n\n document_root = safe_join(settings.MEDIA_ROOT, challenge.short_name)\n fullpath = safe_join(document_root, path)\n allowed = can_access(\n request.user,\n fullpath[len(document_root) :].lstrip(\"/\"),\n challenge=challenge,\n )\n elif folder:\n document_root = safe_join(settings.MEDIA_ROOT, folder)\n fullpath = safe_join(document_root, path)\n allowed = True\n else:\n raise AttributeError(\"challenge_name or folder must be set\")\n\n if not allowed:\n raise Http404(\"File not found.\")\n\n return serve_fullpath(fullpath=fullpath)\n\n\ndef serve_images(request, *, pk, path):\n document_root = safe_join(f\"/{settings.IMAGE_FILES_SUBDIRECTORY}\", str(pk))\n path = posixpath.normpath(path).lstrip(\"/\")\n name = safe_join(document_root, path)\n\n try:\n image = Image.objects.get(pk=pk)\n except Image.DoesNotExist:\n raise Http404(\"File not found.\")\n\n try:\n user, _ = TokenAuthentication().authenticate(request)\n except (AuthenticationFailed, TypeError):\n user = request.user\n\n if user_can_download_image(user=user, image=image):\n return protected_storage_redirect(name=name)\n\n raise Http404(\"File not found.\")\n\n\ndef serve_submissions(request, *, submission_pk, **_):\n try:\n submission = Submission.objects.get(pk=submission_pk)\n except Submission.DoesNotExist:\n raise Http404(\"File not found.\")\n\n if user_can_download_submission(user=request.user, submission=submission):\n return protected_storage_redirect(name=submission.file.name)\n\n raise Http404(\"File not found.\")\n", "path": "app/grandchallenge/serving/views.py" } ]
diff --git a/Pipfile b/Pipfile index 86912d68d6..3efc1e76a3 100644 --- a/Pipfile +++ b/Pipfile @@ -21,7 +21,7 @@ werkzeug = "*" "beautifulsoup4" = "*" celery = "*" redis = "*" -django = "<2.2" +django = "<2.3" django-countries = "*" django-crispy-forms = "*" django-userena-ce = "*" @@ -39,8 +39,8 @@ nbconvert = "*" simpleitk = "*" django-celery-beat = "*" django-favicon-plus = "*" -"psycopg2" = "<2.8" # v2.8 is broken in Django 2.1, fixed in Django 2.2 -"django-select2" = "<7.0" # v7.0 supports Django 2.2 but is currently borked on PyPi +"psycopg2" = "*" +"django-select2" = "*" django-celery-results = "*" django-summernote = "*" bleach = "*" diff --git a/Pipfile.lock b/Pipfile.lock index 529c728590..35ae439fed 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "a11a96981ef9de32b0d6b789522b8ee97a7a8e14f1d468ea29ffffe9d4ab09f0" + "sha256": "db39c2eacae0421dea713817c4ce3576ada1551df465111f154afdeabf01f706" }, "pipfile-spec": 6, "requires": {}, @@ -53,18 +53,18 @@ }, "boto3": { "hashes": [ - "sha256:59782c178af2d5acf66315fe96b3cd1dc075109c0296c384e18a6c4143c0745d", - "sha256:758787b5ad7c5e2aa2979b0671129491fbab00a7b84a26532cd6b9d073ed862b" + "sha256:98516650f72c02d0c7b47e6f0df69c19a0d1046d3973783ed7b0f093e8e81adb", + "sha256:aa06b240e49dbc8443bd96086c92baa0275281e0f7cc0439a328fef9ac254253" ], "index": "pypi", - "version": "==1.9.156" + "version": "==1.9.157" }, "botocore": { "hashes": [ - "sha256:00b72bc2104a2f56513bc40ce380d0605262decc9fe3b2ce840da48f257598d7", - "sha256:a12a817bf1faf36837bc2d371aacfb5c7c324e0e9f0b3af94b9930cfcd8d62ea" + "sha256:2ded06af31f9e423fcf549e35f2fa0fd618e12995e37ebc52186dbaa870316c6", + "sha256:473955cd4eda6121047205fc43c56ab0e0616b93651bac5e9c747fc180603fe2" ], - "version": "==1.12.156" + "version": "==1.12.157" }, "brotli": { "hashes": [ @@ -146,11 +146,11 @@ }, "django": { "hashes": [ - "sha256:0fd54e4f27bc3e0b7054a11e6b3a18fa53f2373f6b2df8a22e8eadfe018970a5", - "sha256:f3b28084101d516f56104856761bc247f85a2a5bbd9da39d9f6197ff461b3ee4" + "sha256:6fcc3cbd55b16f9a01f37de8bcbe286e0ea22e87096557f1511051780338eaea", + "sha256:bb407d0bb46395ca1241f829f5bd03f7e482f97f7d1936e26e98dacb201ed4ec" ], "index": "pypi", - "version": "==2.1.8" + "version": "==2.2.1" }, "django-appconf": { "hashes": [ @@ -236,11 +236,11 @@ }, "django-select2": { "hashes": [ - "sha256:7e5a3da54332cb94174a91e0a03177ce44c152049b0876424ba81fc805dbb89f", - "sha256:f93b5b626e5c0e8ced24e1e31027eb4850649216e4132cdc3f3880c0bed241c9" + "sha256:80ecbd5a8caa8195664e03d23f33f1f55210f7cd6eeb52eae4ee0adc417cde0d", + "sha256:b6036494d8e3790693a1d134de8cffc731e12f01f64031e406b33aacc63c7d65" ], "index": "pypi", - "version": "==6.3.1" + "version": "==7.0.3" }, "django-simple-history": { "hashes": [ @@ -501,31 +501,31 @@ }, "numpy": { "hashes": [ - "sha256:0e2eed77804b2a6a88741f8fcac02c5499bba3953ec9c71e8b217fad4912c56c", - "sha256:1c666f04553ef70fda54adf097dbae7080645435fc273e2397f26bbf1d127bbb", - "sha256:1f46532afa7b2903bfb1b79becca2954c0a04389d19e03dc73f06b039048ac40", - "sha256:315fa1b1dfc16ae0f03f8fd1c55f23fd15368710f641d570236f3d78af55e340", - "sha256:3d5fcea4f5ed40c3280791d54da3ad2ecf896f4c87c877b113576b8280c59441", - "sha256:48241759b99d60aba63b0e590332c600fc4b46ad597c9b0a53f350b871ef0634", - "sha256:4b4f2924b36d857cf302aec369caac61e43500c17eeef0d7baacad1084c0ee84", - "sha256:54fe3b7ed9e7eb928bbc4318f954d133851865f062fa4bbb02ef8940bc67b5d2", - "sha256:5a8f021c70e6206c317974c93eaaf9bc2b56295b6b1cacccf88846e44a1f33fc", - "sha256:754a6be26d938e6ca91942804eb209307b73f806a1721176278a6038869a1686", - "sha256:771147e654e8b95eea1293174a94f34e2e77d5729ad44aefb62fbf8a79747a15", - "sha256:78a6f89da87eeb48014ec652a65c4ffde370c036d780a995edaeb121d3625621", - "sha256:7fde5c2a3a682a9e101e61d97696687ebdba47637611378b4127fe7e47fdf2bf", - "sha256:80d99399c97f646e873dd8ce87c38cfdbb668956bbc39bc1e6cac4b515bba2a0", - "sha256:88a72c1e45a0ae24d1f249a529d9f71fe82e6fa6a3fd61414b829396ec585900", - "sha256:a4f4460877a16ac73302a9c077ca545498d9fe64e6a81398d8e1a67e4695e3df", - "sha256:a61255a765b3ac73ee4b110b28fccfbf758c985677f526c2b4b39c48cc4b509d", - "sha256:ab4896a8c910b9a04c0142871d8800c76c8a2e5ff44763513e1dd9d9631ce897", - "sha256:abbd6b1c2ef6199f4b7ca9f818eb6b31f17b73a6110aadc4e4298c3f00fab24e", - "sha256:b16d88da290334e33ea992c56492326ea3b06233a00a1855414360b77ca72f26", - "sha256:b78a1defedb0e8f6ae1eb55fa6ac74ab42acc4569c3a2eacc2a407ee5d42ebcb", - "sha256:cfef82c43b8b29ca436560d51b2251d5117818a8d1fb74a8384a83c096745dad", - "sha256:d160e57731fcdec2beda807ebcabf39823c47e9409485b5a3a1db3a8c6ce763e" - ], - "version": "==1.16.3" + "sha256:0778076e764e146d3078b17c24c4d89e0ecd4ac5401beff8e1c87879043a0633", + "sha256:141c7102f20abe6cf0d54c4ced8d565b86df4d3077ba2343b61a6db996cefec7", + "sha256:14270a1ee8917d11e7753fb54fc7ffd1934f4d529235beec0b275e2ccf00333b", + "sha256:27e11c7a8ec9d5838bc59f809bfa86efc8a4fd02e58960fa9c49d998e14332d5", + "sha256:2a04dda79606f3d2f760384c38ccd3d5b9bb79d4c8126b67aff5eb09a253763e", + "sha256:3c26010c1b51e1224a3ca6b8df807de6e95128b0908c7e34f190e7775455b0ca", + "sha256:52c40f1a4262c896420c6ea1c6fda62cf67070e3947e3307f5562bd783a90336", + "sha256:6e4f8d9e8aa79321657079b9ac03f3cf3fd067bf31c1cca4f56d49543f4356a5", + "sha256:7242be12a58fec245ee9734e625964b97cf7e3f2f7d016603f9e56660ce479c7", + "sha256:7dc253b542bfd4b4eb88d9dbae4ca079e7bf2e2afd819ee18891a43db66c60c7", + "sha256:94f5bd885f67bbb25c82d80184abbf7ce4f6c3c3a41fbaa4182f034bba803e69", + "sha256:a89e188daa119ffa0d03ce5123dee3f8ffd5115c896c2a9d4f0dbb3d8b95bfa3", + "sha256:ad3399da9b0ca36e2f24de72f67ab2854a62e623274607e37e0ce5f5d5fa9166", + "sha256:b0348be89275fd1d4c44ffa39530c41a21062f52299b1e3ee7d1c61f060044b8", + "sha256:b5554368e4ede1856121b0dfa35ce71768102e4aa55e526cb8de7f374ff78722", + "sha256:cbddc56b2502d3f87fda4f98d948eb5b11f36ff3902e17cb6cc44727f2200525", + "sha256:d79f18f41751725c56eceab2a886f021d70fd70a6188fd386e29a045945ffc10", + "sha256:dc2ca26a19ab32dc475dbad9dfe723d3a64c835f4c23f625c2b6566ca32b9f29", + "sha256:dd9bcd4f294eb0633bb33d1a74febdd2b9018b8b8ed325f861fffcd2c7660bb8", + "sha256:e8baab1bc7c9152715844f1faca6744f2416929de10d7639ed49555a85549f52", + "sha256:ec31fe12668af687b99acf1567399632a7c47b0e17cfb9ae47c098644ef36797", + "sha256:f12b4f7e2d8f9da3141564e6737d79016fe5336cc92de6814eba579744f65b0a", + "sha256:f58ac38d5ca045a377b3b377c84df8175ab992c970a53332fa8ac2373df44ff7" + ], + "version": "==1.16.4" }, "oauth2": { "hashes": [ @@ -581,39 +581,20 @@ }, "psycopg2": { "hashes": [ - "sha256:02445ebbb3a11a3fe8202c413d5e6faf38bb75b4e336203ee144ca2c46529f94", - "sha256:0e9873e60f98f0c52339abf8f0339d1e22bfe5aae0bcf7aabd40c055175035ec", - "sha256:1148a5eb29073280bf9057c7fc45468592c1bb75a28f6df1591adb93c8cb63d0", - "sha256:259a8324e109d4922b0fcd046e223e289830e2568d6f4132a3702439e5fd532b", - "sha256:28dffa9ed4595429e61bacac41d3f9671bb613d1442ff43bcbec63d4f73ed5e8", - "sha256:314a74302d4737a3865d40ea50e430ce1543c921ba10f39d562e807cfe2edf2a", - "sha256:36b60201b6d215d7658a71493fdf6bd5e60ad9a0cffed39906627ff9f4f3afd3", - "sha256:3f9d532bce54c4234161176ff3b8688ff337575ca441ea27597e112dfcd0ee0c", - "sha256:5d222983847b40af989ad96c07fc3f07e47925e463baa5de716be8f805b41d9b", - "sha256:6757a6d2fc58f7d8f5d471ad180a0bd7b4dd3c7d681f051504fbea7ae29c8d6f", - "sha256:6a0e0f1e74edb0ab57d89680e59e7bfefad2bfbdf7c80eb38304d897d43674bb", - "sha256:6ca703ccdf734e886a1cf53eb702261110f6a8b0ed74bcad15f1399f74d3f189", - "sha256:8513b953d8f443c446aa79a4cc8a898bd415fc5e29349054f03a7d696d495542", - "sha256:9262a5ce2038570cb81b4d6413720484cb1bc52c064b2f36228d735b1f98b794", - "sha256:97441f851d862a0c844d981cbee7ee62566c322ebb3d68f86d66aa99d483985b", - "sha256:a07feade155eb8e69b54dd6774cf6acf2d936660c61d8123b8b6b1f9247b67d6", - "sha256:a9b9c02c91b1e3ec1f1886b2d0a90a0ea07cc529cb7e6e472b556bc20ce658f3", - "sha256:ae88216f94728d691b945983140bf40d51a1ff6c7fe57def93949bf9339ed54a", - "sha256:b360ffd17659491f1a6ad7c928350e229c7b7bd83a2b922b6ee541245c7a776f", - "sha256:b4221957ceccf14b2abdabef42d806e791350be10e21b260d7c9ce49012cc19e", - "sha256:b90758e49d5e6b152a460d10b92f8a6ccf318fcc0ee814dcf53f3a6fc5328789", - "sha256:c669ea986190ed05fb289d0c100cc88064351f2b85177cbfd3564c4f4847d18c", - "sha256:d1b61999d15c79cf7f4f7cc9021477aef35277fc52452cf50fd13b713c84424d", - "sha256:de7bb043d1adaaf46e38d47e7a5f703bb3dab01376111e522b07d25e1a79c1e1", - "sha256:e393568e288d884b94d263f2669215197840d097c7e5b0acd1a51c1ea7d1aba8", - "sha256:ed7e0849337bd37d89f2c2b0216a0de863399ee5d363d31b1e5330a99044737b", - "sha256:f153f71c3164665d269a5d03c7fa76ba675c7a8de9dc09a4e2c2cdc9936a7b41", - "sha256:f1fb5a8427af099beb7f65093cbdb52e021b8e6dbdfaf020402a623f4181baf5", - "sha256:f36b333e9f86a2fba960c72b90c34be6ca71819e300f7b1fc3d2b0f0b2c546cd", - "sha256:f4526d078aedd5187d0508aa5f9a01eae6a48a470ed678406da94b4cd6524b7e" + "sha256:00cfecb3f3db6eb76dcc763e71777da56d12b6d61db6a2c6ccbbb0bff5421f8f", + "sha256:076501fc24ae13b2609ba2303d88d4db79072562f0b8cc87ec1667dedff99dc1", + "sha256:4e2b34e4c0ddfeddf770d7df93e269700b080a4d2ec514fec668d71895f56782", + "sha256:5cacf21b6f813c239f100ef78a4132056f93a5940219ec25d2ef833cbeb05588", + "sha256:61f58e9ecb9e4dc7e30be56b562f8fc10ae3addcfcef51b588eed10a5a66100d", + "sha256:8954ff6e47247bdd134db602fcadfc21662835bd92ce0760f3842eacfeb6e0f3", + "sha256:b6e8c854cdc623028e558a409b06ea2f16d13438335941c7765d0a42b5bedd33", + "sha256:baca21c0f7344576346e260454d0007313ccca8c170684707a63946b27a56c8f", + "sha256:bb1735378770fb95dbe392d29e71405d45c8bdcfa064f916504833a92ab03c55", + "sha256:de3d3c46c1ee18f996db42d1eb44cf1565cc9e38fb1dbd9b773ff6b3fa8035d7", + "sha256:dee885602bb200bdcb1d30f6da6c7bb207360bc786d0a364fe1540dd14af0bab" ], "index": "pypi", - "version": "==2.7.7" + "version": "==2.8.2" }, "pygments": { "hashes": [ @@ -803,6 +784,13 @@ ], "version": "==1.9.1" }, + "sqlparse": { + "hashes": [ + "sha256:40afe6b8d4b1117e7dff5504d7a8ce07d9a1b15aeeade8a2d10f130a834f8177", + "sha256:7c3dca29c022744e95b547e867cee89f4fce4373f3549ccd8797d8eb52cdb873" + ], + "version": "==0.3.0" + }, "testpath": { "hashes": [ "sha256:46c89ebb683f473ffe2aab0ed9f12581d4d078308a3cb3765d79c6b2317b0109", @@ -988,11 +976,11 @@ }, "django": { "hashes": [ - "sha256:0fd54e4f27bc3e0b7054a11e6b3a18fa53f2373f6b2df8a22e8eadfe018970a5", - "sha256:f3b28084101d516f56104856761bc247f85a2a5bbd9da39d9f6197ff461b3ee4" + "sha256:6fcc3cbd55b16f9a01f37de8bcbe286e0ea22e87096557f1511051780338eaea", + "sha256:bb407d0bb46395ca1241f829f5bd03f7e482f97f7d1936e26e98dacb201ed4ec" ], "index": "pypi", - "version": "==2.1.8" + "version": "==2.2.1" }, "django-debug-toolbar": { "hashes": [ diff --git a/app/grandchallenge/serving/views.py b/app/grandchallenge/serving/views.py index 2af4f71a82..de9d3d1cd9 100644 --- a/app/grandchallenge/serving/views.py +++ b/app/grandchallenge/serving/views.py @@ -101,7 +101,7 @@ def serve_folder(request, *, challenge_name=None, folder=None, path): def serve_images(request, *, pk, path): - document_root = safe_join(f"/{settings.IMAGE_FILES_SUBDIRECTORY}", pk) + document_root = safe_join(f"/{settings.IMAGE_FILES_SUBDIRECTORY}", str(pk)) path = posixpath.normpath(path).lstrip("/") name = safe_join(document_root, path)
cupy__cupy-4452
Triangular Solve not working for complex case I have discovered, that `solve_triangular` does not work, if the input is complex, but the matrix is real: ``` solve_triangular( cholesky, rhs, lower=True, ) ``` produces a real valued result, if `cholesky` is real but `rhs` is complex. ``` intelpython3 -c 'import cupy; cupy.show_config()' OS : Linux-5.9.6-1-MANJARO-x86_64-with-arch-Manjaro-Linux CuPy Version : 9.0.0a1 NumPy Version : 1.16.2+intel.0 SciPy Version : 1.5.0 Cython Build Version : 0.29.21 Cython Runtime Version : 0.29.6 CUDA Root : /opt/cuda CUDA Build Version : 11010 CUDA Driver Version : 11010 CUDA Runtime Version : 11010 cuBLAS Version : 11300 cuFFT Version : 10300 cuRAND Version : 10202 cuSOLVER Version : (11, 0, 1) cuSPARSE Version : 11300 NVRTC Version : (11, 1) Thrust Version : 100910 CUB Build Version : 100910 cuDNN Build Version : None cuDNN Version : None NCCL Build Version : 2708 NCCL Runtime Version : 2708 cuTENSOR Version : None Device 0 Name : GeForce RTX 3090 Device 0 Compute Capability : 86 ```
[ { "content": "import numpy\n\nimport cupy\nfrom cupy.cuda import cublas\nfrom cupy.cuda import device\nfrom cupy.linalg import _util\n\n\ndef solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,\n overwrite_b=False, check_finite=False):\n \"\"\"Solve the equation a x = b for x, assuming a is a triangular matrix.\n\n Args:\n a (cupy.ndarray): The matrix with dimension ``(M, M)``.\n b (cupy.ndarray): The matrix with dimension ``(M,)`` or\n ``(M, N)``.\n lower (bool): Use only data contained in the lower triangle of ``a``.\n Default is to use upper triangle.\n trans (0, 1, 2, 'N', 'T' or 'C'): Type of system to solve:\n\n - *'0'* or *'N'* -- :math:`a x = b`\n - *'1'* or *'T'* -- :math:`a^T x = b`\n - *'2'* or *'C'* -- :math:`a^H x = b`\n\n unit_diagonal (bool): If ``True``, diagonal elements of ``a`` are\n assumed to be 1 and will not be referenced.\n overwrite_b (bool): Allow overwriting data in b (may enhance\n performance)\n check_finite (bool): Whether to check that the input matrices contain\n only finite numbers. Disabling may give a performance gain, but may\n result in problems (crashes, non-termination) if the inputs do\n contain infinities or NaNs.\n\n Returns:\n cupy.ndarray:\n The matrix with dimension ``(M,)`` or ``(M, N)``.\n\n .. seealso:: :func:`scipy.linalg.solve_triangular`\n \"\"\"\n\n _util._assert_cupy_array(a, b)\n\n if len(a.shape) != 2 or a.shape[0] != a.shape[1]:\n raise ValueError('expected square matrix')\n if len(a) != len(b):\n raise ValueError('incompatible dimensions')\n\n # Cast to float32 or float64\n if a.dtype.char in 'fd':\n dtype = a.dtype\n else:\n dtype = numpy.promote_types(a.dtype.char, 'f')\n\n a = cupy.array(a, dtype=dtype, order='F', copy=False)\n b = cupy.array(b, dtype=dtype, order='F', copy=(not overwrite_b))\n\n if check_finite:\n if a.dtype.kind == 'f' and not cupy.isfinite(a).all():\n raise ValueError(\n 'array must not contain infs or NaNs')\n if b.dtype.kind == 'f' and not cupy.isfinite(b).all():\n raise ValueError(\n 'array must not contain infs or NaNs')\n\n m, n = (b.size, 1) if b.ndim == 1 else b.shape\n cublas_handle = device.get_cublas_handle()\n\n if dtype == 'f':\n trsm = cublas.strsm\n else: # dtype == 'd'\n trsm = cublas.dtrsm\n one = numpy.array(1, dtype=dtype)\n\n if lower:\n uplo = cublas.CUBLAS_FILL_MODE_LOWER\n else:\n uplo = cublas.CUBLAS_FILL_MODE_UPPER\n\n if trans == 'N':\n trans = cublas.CUBLAS_OP_N\n elif trans == 'T':\n trans = cublas.CUBLAS_OP_T\n elif trans == 'C':\n trans = cublas.CUBLAS_OP_C\n\n if unit_diagonal:\n diag = cublas.CUBLAS_DIAG_UNIT\n else:\n diag = cublas.CUBLAS_DIAG_NON_UNIT\n\n trsm(\n cublas_handle, cublas.CUBLAS_SIDE_LEFT, uplo,\n trans, diag,\n m, n, one.ctypes.data, a.data.ptr, m, b.data.ptr, m)\n return b\n", "path": "cupyx/scipy/linalg/solve_triangular.py" } ]
[ { "content": "import numpy\n\nimport cupy\nfrom cupy.cuda import cublas\nfrom cupy.cuda import device\nfrom cupy.linalg import _util\n\n\ndef solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,\n overwrite_b=False, check_finite=False):\n \"\"\"Solve the equation a x = b for x, assuming a is a triangular matrix.\n\n Args:\n a (cupy.ndarray): The matrix with dimension ``(M, M)``.\n b (cupy.ndarray): The matrix with dimension ``(M,)`` or\n ``(M, N)``.\n lower (bool): Use only data contained in the lower triangle of ``a``.\n Default is to use upper triangle.\n trans (0, 1, 2, 'N', 'T' or 'C'): Type of system to solve:\n\n - *'0'* or *'N'* -- :math:`a x = b`\n - *'1'* or *'T'* -- :math:`a^T x = b`\n - *'2'* or *'C'* -- :math:`a^H x = b`\n\n unit_diagonal (bool): If ``True``, diagonal elements of ``a`` are\n assumed to be 1 and will not be referenced.\n overwrite_b (bool): Allow overwriting data in b (may enhance\n performance)\n check_finite (bool): Whether to check that the input matrices contain\n only finite numbers. Disabling may give a performance gain, but may\n result in problems (crashes, non-termination) if the inputs do\n contain infinities or NaNs.\n\n Returns:\n cupy.ndarray:\n The matrix with dimension ``(M,)`` or ``(M, N)``.\n\n .. seealso:: :func:`scipy.linalg.solve_triangular`\n \"\"\"\n\n _util._assert_cupy_array(a, b)\n\n if len(a.shape) != 2 or a.shape[0] != a.shape[1]:\n raise ValueError('expected square matrix')\n if len(a) != len(b):\n raise ValueError('incompatible dimensions')\n\n # Cast to float32 or float64\n if a.dtype.char in 'fd':\n dtype = a.dtype\n else:\n dtype = numpy.promote_types(a.dtype.char, 'f')\n\n a = cupy.array(a, dtype=dtype, order='F', copy=False)\n b = cupy.array(b, dtype=dtype, order='F', copy=(not overwrite_b))\n\n if check_finite:\n if a.dtype.kind == 'f' and not cupy.isfinite(a).all():\n raise ValueError(\n 'array must not contain infs or NaNs')\n if b.dtype.kind == 'f' and not cupy.isfinite(b).all():\n raise ValueError(\n 'array must not contain infs or NaNs')\n\n m, n = (b.size, 1) if b.ndim == 1 else b.shape\n cublas_handle = device.get_cublas_handle()\n\n if dtype == 'f':\n trsm = cublas.strsm\n elif dtype == 'd':\n trsm = cublas.dtrsm\n elif dtype == 'F':\n trsm = cublas.ctrsm\n else: # dtype == 'D'\n trsm = cublas.ztrsm\n one = numpy.array(1, dtype=dtype)\n\n if lower:\n uplo = cublas.CUBLAS_FILL_MODE_LOWER\n else:\n uplo = cublas.CUBLAS_FILL_MODE_UPPER\n\n if trans == 'N':\n trans = cublas.CUBLAS_OP_N\n elif trans == 'T':\n trans = cublas.CUBLAS_OP_T\n elif trans == 'C':\n trans = cublas.CUBLAS_OP_C\n\n if unit_diagonal:\n diag = cublas.CUBLAS_DIAG_UNIT\n else:\n diag = cublas.CUBLAS_DIAG_NON_UNIT\n\n trsm(\n cublas_handle, cublas.CUBLAS_SIDE_LEFT, uplo,\n trans, diag,\n m, n, one.ctypes.data, a.data.ptr, m, b.data.ptr, m)\n return b\n", "path": "cupyx/scipy/linalg/solve_triangular.py" } ]
diff --git a/cupyx/scipy/linalg/solve_triangular.py b/cupyx/scipy/linalg/solve_triangular.py index 5240cc2dd77..c47f74a9f32 100644 --- a/cupyx/scipy/linalg/solve_triangular.py +++ b/cupyx/scipy/linalg/solve_triangular.py @@ -67,8 +67,12 @@ def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, if dtype == 'f': trsm = cublas.strsm - else: # dtype == 'd' + elif dtype == 'd': trsm = cublas.dtrsm + elif dtype == 'F': + trsm = cublas.ctrsm + else: # dtype == 'D' + trsm = cublas.ztrsm one = numpy.array(1, dtype=dtype) if lower: diff --git a/tests/cupyx_tests/scipy_tests/linalg_tests/test_solve_triangular.py b/tests/cupyx_tests/scipy_tests/linalg_tests/test_solve_triangular.py index 921bf5ee699..dc16f24a46d 100644 --- a/tests/cupyx_tests/scipy_tests/linalg_tests/test_solve_triangular.py +++ b/tests/cupyx_tests/scipy_tests/linalg_tests/test_solve_triangular.py @@ -25,7 +25,7 @@ @testing.with_requires('scipy') class TestSolveTriangular(unittest.TestCase): - @testing.for_float_dtypes(no_float16=True) + @testing.for_dtypes('fdFD') def check_x(self, a_shape, b_shape, dtype): a_cpu = numpy.random.randint(1, 10, size=a_shape).astype(dtype) b_cpu = numpy.random.randint(1, 10, size=b_shape).astype(dtype)
dotkom__onlineweb4-496
Make offline archive look more like event archive Same as #481. This is mainly about the filtering section.
[ { "content": "#-*- coding: utf-8 -*-\nfrom copy import copy\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import slugify\nfrom django.utils import timezone\n\nfrom filebrowser.base import FileObject\nfrom filebrowser.settings import VERSIONS\nfrom tastypie import fields\nfrom tastypie.resources import ModelResource\n\nfrom apps.api.v0.authentication import UserResource\nfrom apps.article.models import Article, ArticleTag, Tag\n\n\n\n\nclass ArticleResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by')\n \n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict):\n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n \n # Making multiple images for the article\n def dehydrate(self, bundle):\n \n # Setting slug-field\n bundle.data['slug'] = slugify(bundle.data['heading'])\n \n # If image is set\n if bundle.data['image']:\n # Parse to FileObject used by Filebrowser\n temp_image = FileObject(bundle.data['image'])\n \n # Itterate the different versions (by key)\n for ver in VERSIONS.keys():\n # Check if the key start with article_ (if it does, we want to crop to that size)\n if ver.startswith('article_'):\n # Adding the new image to the object\n bundle.data['image_'+ver] = temp_image.version_generate(ver).url\n \n # Unset the image-field\n del(bundle.data['image'])\n \n # Returning washed object\n return bundle\n \n def get_object_list(self, request):\n # Getting the GET-params\n if 'tag' in request.GET:\n request_tag = request.GET['tag']\n else:\n request_tag = None\n \n if 'year' in request.GET:\n request_year = request.GET['year']\n else:\n request_year = None\n \n if 'month' in request.GET:\n request_month = request.GET['month']\n else:\n request_month = None\n \n # Check filtering here\n if (request_year is not None):\n if (request_month is not None):\n # Filtering on both year and month\n queryset = Article.objects.filter(published_date__year=request_year, published_date__month=request_month, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Filtering on only year\n queryset = Article.objects.filter(published_date__year=request_year, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Not filtering on year, check if filtering on slug (tag) or return default query\n if (request_tag is not None):\n # Filtering on slug\n slug_query = Tag.objects.filter(slug = request_tag)\n slug_connect = ArticleTag.objects.filter(tag = slug_query).values('article_id')\n queryset = Article.objects.filter(id__in = slug_connect, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # No filtering at all, return default query\n queryset = Article.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return queryset\n \n class Meta: \n API_LIMIT_PER_PAGE = 9\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n resource_name = 'article/all'\n ordering = ['-published_date']\n include_absolute_url = True\n filtering = {\n 'featured' : ('exact',),\n 'published_date' : ('gte',),\n }\n\nclass ArticleLatestResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by')\n \n class Meta:\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n \n resource_name = 'article/latest'\n filtering = {\n 'featured': ('exact',)\n }\n ordering = ['-published_date']\n max_limit = 25\n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict): \n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n def dehydrate(self, bundle):\n bundle.data['slug'] = slugify(bundle.data['heading'])\n return bundle\n", "path": "apps/api/v0/article.py" } ]
[ { "content": "#-*- coding: utf-8 -*-\nfrom copy import copy\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import slugify\nfrom django.utils import timezone\n\nfrom filebrowser.base import FileObject\nfrom filebrowser.settings import VERSIONS\nfrom tastypie import fields\nfrom tastypie.resources import ModelResource\n\nfrom apps.api.v0.authentication import UserResource\nfrom apps.article.models import Article, ArticleTag, Tag\n\n\n\n\nclass ArticleResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by', full=True)\n \n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict):\n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n \n # Making multiple images for the article\n def dehydrate(self, bundle):\n \n # Setting slug-field\n bundle.data['slug'] = slugify(bundle.data['heading'])\n \n # If image is set\n if bundle.data['image']:\n # Parse to FileObject used by Filebrowser\n temp_image = FileObject(bundle.data['image'])\n \n # Itterate the different versions (by key)\n for ver in VERSIONS.keys():\n # Check if the key start with article_ (if it does, we want to crop to that size)\n if ver.startswith('article_'):\n # Adding the new image to the object\n bundle.data['image_'+ver] = temp_image.version_generate(ver).url\n \n # Unset the image-field\n del(bundle.data['image'])\n \n # Returning washed object\n return bundle\n \n def get_object_list(self, request):\n # Getting the GET-params\n if 'tag' in request.GET:\n request_tag = request.GET['tag']\n else:\n request_tag = None\n \n if 'year' in request.GET:\n request_year = request.GET['year']\n else:\n request_year = None\n \n if 'month' in request.GET:\n request_month = request.GET['month']\n else:\n request_month = None\n \n # Check filtering here\n if (request_year is not None):\n if (request_month is not None):\n # Filtering on both year and month\n queryset = Article.objects.filter(published_date__year=request_year, published_date__month=request_month, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Filtering on only year\n queryset = Article.objects.filter(published_date__year=request_year, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Not filtering on year, check if filtering on slug (tag) or return default query\n if (request_tag is not None):\n # Filtering on slug\n slug_query = Tag.objects.filter(slug = request_tag)\n slug_connect = ArticleTag.objects.filter(tag = slug_query).values('article_id')\n queryset = Article.objects.filter(id__in = slug_connect, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # No filtering at all, return default query\n queryset = Article.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return queryset\n \n class Meta: \n API_LIMIT_PER_PAGE = 9\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n resource_name = 'article/all'\n ordering = ['-published_date']\n include_absolute_url = True\n filtering = {\n 'featured' : ('exact',),\n 'published_date' : ('gte',),\n }\n\nclass ArticleLatestResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by')\n \n class Meta:\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n \n resource_name = 'article/latest'\n filtering = {\n 'featured': ('exact',)\n }\n ordering = ['-published_date']\n max_limit = 25\n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict): \n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n def dehydrate(self, bundle):\n bundle.data['slug'] = slugify(bundle.data['heading'])\n return bundle\n", "path": "apps/api/v0/article.py" } ]
diff --git a/apps/api/v0/article.py b/apps/api/v0/article.py index 9c52e14cc..3cf0f40da 100755 --- a/apps/api/v0/article.py +++ b/apps/api/v0/article.py @@ -17,7 +17,7 @@ class ArticleResource(ModelResource): - author = fields.ToOneField(UserResource, 'created_by') + author = fields.ToOneField(UserResource, 'created_by', full=True) def alter_list_data_to_serialize(self, request, data): # Renames list data 'object' to 'articles'. diff --git a/files/static/js/Article-archive.js b/files/static/js/Article-archive.js index 9f60e47b7..41e823bbf 100644 --- a/files/static/js/Article-archive.js +++ b/files/static/js/Article-archive.js @@ -201,27 +201,36 @@ function ArticleArchive (Utils) { success: function(data) { // Variables var num = 1; - var output = '<div class="row">'; // If we are not on the first page (and not using the filters), make the elements hidden to fade them in later + var output = ''; // The loop for (var i = 0; i < data.articles.length; i++) { // The markup - output += '<div class="col-md-6 article'+((page == 1 && !overwrite)?'':' article-hidden')+'">'; - output += ' <a href="/article/'+data.articles[i].id+'/'+data.articles[i].slug+'">'; - output += ' <img src="'+data.articles[i].image_article_front_small+'" width="100%" alt="'+data.articles[i].heading+'" />'; - output += ' </a>'; - output += ' <a href="'+data.articles[i].id+'/'+data.articles[i].slug+'"><h3>'+data.articles[i].heading+'</h3></a>'; - output += ' <div class="row">'; - output += ' <div class="col-md-12 article-detail-meta">'; - output += ' <span class="meta-caption">Publisert</span> <span>'+moment(data.articles[i].published_date).format('D. MMMM YYYY')+'</span>'; - output += ' </div>'; - output += ' </div>'; - output += ' <p>'+data.articles[i].ingress_short+'</p>'; - output += '</div>'; - - // Every third element in a chunk - if (num % 2 == 0) - output += '</div><div class="row">'; + output += '<div class="row">'; + output += '<div class="col-md-12 article'+((page == 1 && !overwrite)?'':' article-hidden')+'">'; + output += ' <div class="row">'; + output += ' <div class="col-md-4">'; + output += ' <div class="row">'; + output += ' <a href="/article/'+data.articles[i].id+'/'+data.articles[i].slug+'">'; + output += ' <img src="'+data.articles[i].image_article_front_featured+'" width="100%" alt="'+data.articles[i].heading+'" />'; + output += ' </a>'; + output += ' </div><!-- end row -->'; + output += ' </div><!-- end col-md-4 -->'; + output += ' <div class="col-md-8">'; + output += ' <div class="pull-right article-detail-meta">'; + output += ' <span>'+moment(data.articles[i].published_date).format('DD.MM.YYYY')+'</span>'; + output += ' </div>'; + output += ' <a href="'+data.articles[i].id+'/'+data.articles[i].slug+'"><h3>'+data.articles[i].heading+'</h3></a>'; + output += ' <p>'+data.articles[i].ingress_short+'</p>'; + output += ' <div class="meta"><div class="row"><div class="col-md-6">'; + output += ' <p><strong>Publisert av: </strong>' + data.articles[i].author.first_name + ' ' + data.articles[i].author.last_name + '</p>'; + output += ' </div><div class="col-md-6 pull-right">'; + output += ' <p style="text-align: right"><strong>Sist endret: </strong>' + moment(data.articles[i].changed_date).format('DD.MM.YY HH:mm:ss') + '</p>'; + output += ' </div></div></div>'; + output += ' </div><!-- end col-md-8 -->'; + output += ' </div><!-- end row -->'; + output += '</div><!-- end col-md-12 -->'; + output += '</div><!-- end row -->'; // Increasing num! num++; diff --git a/files/static/js/offline-filter.js b/files/static/js/offline-filter.js index dd226c1a5..06405c78c 100644 --- a/files/static/js/offline-filter.js +++ b/files/static/js/offline-filter.js @@ -31,7 +31,7 @@ $(function() { // Check if currently animating if (!buzy) { // Swap classes - $("#filter-menu .active").removeClass("active"); + $("#nav-header .active").removeClass("active"); $(this).parent().addClass("active"); // The sort @@ -50,12 +50,12 @@ $(function() { $('#filter-reset').fadeOut(400); // Checking if currently animated and filter is set - if (!buzy && $('#filter-menu .active').length != 0) { + if (!buzy && $('#nav-header .active').length != 0) { // Resetting issues to display num_issues_to_display = num_issues_to_display_max; // Removing active menu-point - $('#filter-menu .active').removeClass('active'); + $('#nav-header .active').removeClass('active'); // Adding displayable to all issues $('.offline_issue').each(function() { diff --git a/files/static/less/article.less b/files/static/less/article.less index 7e5ced940..d21879568 100644 --- a/files/static/less/article.less +++ b/files/static/less/article.less @@ -1,3 +1,5 @@ +@import 'colors.less'; + section#article-details { @@ -49,29 +51,54 @@ section#article-details { } section#article_archive { - .article-detail-meta { + + h3 { + margin-top: 0px; + font-size: 20px; + } + + img { + margin-bottom: 20px; + } + + #filterbox { + + background: @baby-blue-medium; + padding: 15px; + margin-bottom: 20px; + h3 { - padding-top: 0px; - margin-top: 0px; - line-height: normal; + color: white; } + } + + .article-detail-meta { - font-weight:300; + color: #ee7810; + font-size: 18px; + + font-weight:400; padding-bottom: 10px; - .meta-caption { - font-weight:600; - } } p { - margin-bottom: 30px; + margin-bottom: 5px; + font-size: 16px; + font-weight: 300; + } + + .meta { + border-top: 1px dotted #ccc; + margin-top: 10px; + p { + font-size: 11px; + } } .tag-cloud { a:hover { color: white; } - margin-top:20px; .tag { display: inline-block; margin-bottom: 5px; diff --git a/files/static/less/offline.less b/files/static/less/offline.less index 983c87e0b..e26f94001 100644 --- a/files/static/less/offline.less +++ b/files/static/less/offline.less @@ -1,3 +1,5 @@ +@import 'colors.less'; + /* Help mixin */ .transition_easer(@seconds: 0.2s) { -webkit-transition: all @seconds ease-in-out; @@ -102,10 +104,40 @@ div#offline { display: inline-block; } } -#filter-menu { - a { - padding: 5px 15px; + +.filterbox { + + background-color: @baby-blue-medium; + padding: 15px; + margin-bottom: 20px; + + a, li { + color: white; + } + + a:hover { + color: @baby-blue; border-radius: 0px; } + + li.active a { + border-radius: 0px; + } + + h3 { + color: white; + font-size: 20px; + font-weight: 400; + margin-top: 0px; + } + + #filter-menu { + + a { + padding: 5px 15px; + border-radius: 0px; + } + } + } diff --git a/templates/article/archive.html b/templates/article/archive.html index bc7315f7f..415acaec6 100755 --- a/templates/article/archive.html +++ b/templates/article/archive.html @@ -25,62 +25,48 @@ {% block content %} <section id="article_archive"> + <div class="container"> - <div class="row"> - <div class="col-sm-7 col-md-8"> - <div class="row"> - <div class="col-md-12"> - <div class="page-header"> - <h2>ARTIKKELARKIV</h2> - </div> - </div> - </div> - <div class="row"> - <div class="col-md-12" style="min-height: 400px;" id="article_archive_container"></div> + <div class="row"> + <div class="col-md-12"> + <div class="page-header"> + <h2>ARTIKKELARKIV</h2> </div> </div> + </div><!-- end row --> + <div class="col-xs-12 col-sm-12 col-md-3 pull-right" id="filterbox"> - <div class="col-sm-5 col-md-4"> - <div class="row"> - <div class="col-md-12"> - <div class="page-header"> - <h3>TAGS</h3> - </div> - <div class="tag-cloud" id="article_archive_tagcloud"> - {% for tag in tags %} - <span><a href="{{ tag.get_absolute_url }}" class="tag">{{ tag }}</a></span> - {% endfor %} - </div> - </div> - </div> - <div class="row"> - <div class="col-md-12"> - <div class="page-header"> - <h3>FILTER</h3> - </div> - <div class="date-filter" id="article_archive_filter"> - <div class="row"> - <div class="col-md-12"> - <a class="tag tag-all" id="article_archive_filter_reset" href="{% url 'article_archive' %}">Alle</a> - </div> - </div> - {% for year, months in dates.items %} - <div class="row"> - <div class="year col-md-4"> - <a class="tag tag-year" data-year="{{ year }}" data-month="" href="{% url 'article_archive_year' year %}">{{ year }}</a> - </div> - {% for month in months %} - <div class="col-md-4"><a class="tag tag-month" data-year="{{ year }}" data-month="{{ month }}" href="{% url 'article_archive_month' year month %}">{{ month }}</a></div> - {% endfor %} - </div> - {% endfor %} - </div> - </div> - </div> - </div> - </div> - </div> + <h3>Tags</h3> + + <div class="tag-cloud" id="article_archive_tagcloud"> + {% for tag in tags %} + <span><a href="{{ tag.get_absolute_url }}" class="tag">{{ tag }}</a></span> + {% endfor %} + </div><!-- end tag-cloud --> + + <h3>Filter</h3> + + <div class="date-filter" id="article_archive_filter"> + + <a class="tag tag-all" id="article_archive_filter_reset" href="{% url 'article_archive' %}">Alle</a> + + {% for year, months in dates.items %} + <a class="tag tag-year" data-year="{{ year }}" data-month="" href="{% url 'article_archive_year' year %}">{{ year }}</a> + {% for month in months %} + <a class="tag tag-month" data-year="{{ year }}" data-month="{{ month }}" href="{% url 'article_archive_month' year month %}">{{ month }}</a> + {% endfor %} + {% endfor %} + + </div><!-- end date-filter --> + + </div><!-- end col-md-3 --> + + <div class="col-xs-12 col-sm-12 col-md-9 pull-left" id="article_archive_container"> + + </div><!-- end col-md-9 --> + + </div><!-- end container --> </section> {% endblock content %} diff --git a/templates/offline/offline.html b/templates/offline/offline.html index 42a60db8e..8783aaefa 100644 --- a/templates/offline/offline.html +++ b/templates/offline/offline.html @@ -36,8 +36,19 @@ <h2>OFFLINE</h2> </div> </div> </div> - <div class="row"> - <div class="col-md-9"> + + <div class="col-xs-12 col-sm-4 col-md-3 filterbox pull-right"> + <h3>Filter</h3> + <ul id="nav-header" class="nav nav-pills nav-stacked"> + <li class="nav-header">År<button id="filter-reset" class="close pull-righ">&times;</button></li> + {% for year in years %} + <li><a class="filter-year" href="#{{ year }}">{{ year }}</a></li> + {% endfor %} + </ul> + </div> + + <div class="col-xs-12 col-sm-8 col-md-9 pull-left"> + <div class="row"> <p>{% chunk 'offline_ingress' %}</p> <div id="offline-wrapper" class="col-md-12"> {% for issue in issues %} @@ -53,15 +64,8 @@ <h2>OFFLINE</h2> </ul> </div> </div> - <div class="col-md-3"> - <ul id="filter-menu" class="nav nav-pills nav-stacked"> - <li class="nav-header">År<button id="filter-reset" class="close pull-right">&times;</button></li> - {% for year in years %} - <li><a class="filter-year" href="#{{ year }}">{{year}}</a></li> - {% endfor %} - </ul> - </div> - </div> + </div> + </div> </section> {% endblock content %}
googleapis__python-spanner-django-58
dbapi: implement bulk_insert_sql Required at least by Django's DatabaseOperations.
[ { "content": "from django.db.backends.base.operations import BaseDatabaseOperations\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n def quote_name(self, name):\n return name\n", "path": "spanner/django/operations.py" } ]
[ { "content": "from django.db.backends.base.operations import BaseDatabaseOperations\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n def quote_name(self, name):\n return name\n\n def bulk_insert_sql(self, fields, placeholder_rows):\n placeholder_rows_sql = (\", \".join(row) for row in placeholder_rows)\n values_sql = \", \".join(\"(%s)\" % sql for sql in placeholder_rows_sql)\n return \"VALUES \" + values_sql\n", "path": "spanner/django/operations.py" } ]
diff --git a/spanner/django/operations.py b/spanner/django/operations.py index ffb1991278..9604ad4f51 100644 --- a/spanner/django/operations.py +++ b/spanner/django/operations.py @@ -4,3 +4,8 @@ class DatabaseOperations(BaseDatabaseOperations): def quote_name(self, name): return name + + def bulk_insert_sql(self, fields, placeholder_rows): + placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) + values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) + return "VALUES " + values_sql
geopandas__geopandas-2923
BUG: .explore() shows incorrect colors with scheme = 'UserDefined' #### Code Sample ```python import geopandas as gpd import geodatasets chicago = gpd.read_file(geodatasets.get_path("geoda.chicago_commpop")) bins=[25000, 50000, 75000, 100000] chicago.plot(column='POP2010', scheme='UserDefined', classification_kwds={'bins': bins}, legend=True, legend_kwds={'loc': 'upper left', 'bbox_to_anchor': (1, 1)}) ``` ```python chicago.explore(column='POP2010', scheme='UserDefined', classification_kwds={'bins': bins}, tooltip=['POP2010'], legend=True) ``` #### Problem description When using `scheme='UserDefined'` the colors differ when using `.plot()` and `.explore()`. In `.explore()` values between 75000 and 100000 are light green, whereas when using `.plot()` these are yellow. The legends for both visuals are the same though. The problem seems to be that there are values in the dataset not included in the bins. If I change this to `bins=[0, 25000, 50000, 75000, 100000]`, then the colors in `.explore()` are correct, but in `.plot()` an extra category is added to the legend, i.e. '-inf, 0.00', even though there is no data in this bin. #### Expected Output I would expect `.plot()` and `.explore()` to show the same colors and legend. #### Output of ``geopandas.show_versions()`` <details> SYSTEM INFO ----------- python : 3.10.4 (tags/v3.10.4:9d38120, Mar 23 2022, 23:13:41) [MSC v.1929 64 bit (AMD64)] executable : [c:\Python310\python.exe](file:///C:/Python310/python.exe) machine : Windows-10-10.0.19044-SP0 GEOS, GDAL, PROJ INFO --------------------- GEOS : None GEOS lib : None GDAL : 3.5.2 GDAL data dir: [c:\Python310\lib\site-packages\fiona\gdal_data](file:///C:/Python310/lib/site-packages/fiona/gdal_data) PROJ : 9.1.0 PROJ data dir: [c:\Python310\lib\site-packages\pyproj\proj_dir\share\proj](file:///C:/Python310/lib/site-packages/pyproj/proj_dir/share/proj) PYTHON DEPENDENCIES ------------------- geopandas : 0.13.2 numpy : 1.23.1 pandas : 1.4.3 pyproj : 3.4.0 shapely : 1.8.5.post1 fiona : 1.8.22 geoalchemy2: None geopy : 2.3.0 matplotlib : 3.6.3 mapclassify: 2.4.3 pygeos : None pyogrio : None psycopg2 : None pyarrow : 11.0.0 rtree : 1.0.1 </details>
[ { "content": "from statistics import mean\n\nimport geopandas\nfrom shapely.geometry import LineString\nimport numpy as np\nimport pandas as pd\n\nfrom packaging.version import Version\n\n_MAP_KWARGS = [\n \"location\",\n \"prefer_canvas\",\n \"no_touch\",\n \"disable_3d\",\n \"png_enabled\",\n \"zoom_control\",\n \"crs\",\n \"zoom_start\",\n \"left\",\n \"top\",\n \"position\",\n \"min_zoom\",\n \"max_zoom\",\n \"min_lat\",\n \"max_lat\",\n \"min_lon\",\n \"max_lon\",\n \"max_bounds\",\n]\n\n\ndef _explore(\n df,\n column=None,\n cmap=None,\n color=None,\n m=None,\n tiles=\"OpenStreetMap\",\n attr=None,\n tooltip=True,\n popup=False,\n highlight=True,\n categorical=False,\n legend=True,\n scheme=None,\n k=5,\n vmin=None,\n vmax=None,\n width=\"100%\",\n height=\"100%\",\n categories=None,\n classification_kwds=None,\n control_scale=True,\n marker_type=None,\n marker_kwds={},\n style_kwds={},\n highlight_kwds={},\n missing_kwds={},\n tooltip_kwds={},\n popup_kwds={},\n legend_kwds={},\n map_kwds={},\n **kwargs,\n):\n \"\"\"Interactive map based on GeoPandas and folium/leaflet.js\n\n Generate an interactive leaflet map based on :class:`~geopandas.GeoDataFrame`\n\n Parameters\n ----------\n column : str, np.array, pd.Series (default None)\n The name of the dataframe column, :class:`numpy.array`,\n or :class:`pandas.Series` to be plotted. If :class:`numpy.array` or\n :class:`pandas.Series` are used then it must have same length as dataframe.\n cmap : str, matplotlib.Colormap, branca.colormap or function (default None)\n The name of a colormap recognized by ``matplotlib``, a list-like of colors,\n :class:`matplotlib.colors.Colormap`, a :class:`branca.colormap.ColorMap` or\n function that returns a named color or hex based on the column\n value, e.g.::\n\n def my_colormap(value): # scalar value defined in 'column'\n if value > 1:\n return \"green\"\n return \"red\"\n\n color : str, array-like (default None)\n Named color or a list-like of colors (named or hex).\n m : folium.Map (default None)\n Existing map instance on which to draw the plot.\n tiles : str, xyzservices.TileProvider (default 'OpenStreetMap Mapnik')\n Map tileset to use. Can choose from the list supported by folium, query a\n :class:`xyzservices.TileProvider` by a name from ``xyzservices.providers``,\n pass :class:`xyzservices.TileProvider` object or pass custom XYZ URL.\n The current list of built-in providers (when ``xyzservices`` is not available):\n\n ``[\"OpenStreetMap\", \"Stamen Terrain\", “Stamen Toner\", “Stamen Watercolor\"\n \"CartoDB positron\", “CartoDB dark_matter\"]``\n\n You can pass a custom tileset to Folium by passing a Leaflet-style URL\n to the tiles parameter: ``http://{s}.yourtiles.com/{z}/{x}/{y}.png``.\n Be sure to check their terms and conditions and to provide attribution with\n the ``attr`` keyword.\n attr : str (default None)\n Map tile attribution; only required if passing custom tile URL.\n tooltip : bool, str, int, list (default True)\n Display GeoDataFrame attributes when hovering over the object.\n ``True`` includes all columns. ``False`` removes tooltip. Pass string or list of\n strings to specify a column(s). Integer specifies first n columns to be\n included. Defaults to ``True``.\n popup : bool, str, int, list (default False)\n Input GeoDataFrame attributes for object displayed when clicking.\n ``True`` includes all columns. ``False`` removes popup. Pass string or list of\n strings to specify a column(s). Integer specifies first n columns to be\n included. Defaults to ``False``.\n highlight : bool (default True)\n Enable highlight functionality when hovering over a geometry.\n categorical : bool (default False)\n If ``False``, ``cmap`` will reflect numerical values of the\n column being plotted. For non-numerical columns, this\n will be set to True.\n legend : bool (default True)\n Plot a legend in choropleth plots.\n Ignored if no ``column`` is given.\n scheme : str (default None)\n Name of a choropleth classification scheme (requires ``mapclassify`` >= 2.4.0).\n A :func:`mapclassify.classify` will be used\n under the hood. Supported are all schemes provided by ``mapclassify`` (e.g.\n ``'BoxPlot'``, ``'EqualInterval'``, ``'FisherJenks'``, ``'FisherJenksSampled'``,\n ``'HeadTailBreaks'``, ``'JenksCaspall'``, ``'JenksCaspallForced'``,\n ``'JenksCaspallSampled'``, ``'MaxP'``, ``'MaximumBreaks'``,\n ``'NaturalBreaks'``, ``'Quantiles'``, ``'Percentiles'``, ``'StdMean'``,\n ``'UserDefined'``). Arguments can be passed in ``classification_kwds``.\n k : int (default 5)\n Number of classes\n vmin : None or float (default None)\n Minimum value of ``cmap``. If ``None``, the minimum data value\n in the column to be plotted is used.\n vmax : None or float (default None)\n Maximum value of ``cmap``. If ``None``, the maximum data value\n in the column to be plotted is used.\n width : pixel int or percentage string (default: '100%')\n Width of the folium :class:`~folium.folium.Map`. If the argument\n m is given explicitly, width is ignored.\n height : pixel int or percentage string (default: '100%')\n Height of the folium :class:`~folium.folium.Map`. If the argument\n m is given explicitly, height is ignored.\n categories : list-like\n Ordered list-like object of categories to be used for categorical plot.\n classification_kwds : dict (default None)\n Keyword arguments to pass to mapclassify\n control_scale : bool, (default True)\n Whether to add a control scale on the map.\n marker_type : str, folium.Circle, folium.CircleMarker, folium.Marker (default None)\n Allowed string options are ('marker', 'circle', 'circle_marker'). Defaults to\n folium.CircleMarker.\n marker_kwds: dict (default {})\n Additional keywords to be passed to the selected ``marker_type``, e.g.:\n\n radius : float (default 2 for ``circle_marker`` and 50 for ``circle``))\n Radius of the circle, in meters (for ``circle``) or pixels\n (for ``circle_marker``).\n fill : bool (default True)\n Whether to fill the ``circle`` or ``circle_marker`` with color.\n icon : folium.map.Icon\n the :class:`folium.map.Icon` object to use to render the marker.\n draggable : bool (default False)\n Set to True to be able to drag the marker around the map.\n\n style_kwds : dict (default {})\n Additional style to be passed to folium ``style_function``:\n\n stroke : bool (default True)\n Whether to draw stroke along the path. Set it to ``False`` to\n disable borders on polygons or circles.\n color : str\n Stroke color\n weight : int\n Stroke width in pixels\n opacity : float (default 1.0)\n Stroke opacity\n fill : boolean (default True)\n Whether to fill the path with color. Set it to ``False`` to\n disable filling on polygons or circles.\n fillColor : str\n Fill color. Defaults to the value of the color option\n fillOpacity : float (default 0.5)\n Fill opacity.\n style_function : callable\n Function mapping a GeoJson Feature to a style ``dict``.\n\n * Style properties :func:`folium.vector_layers.path_options`\n * GeoJson features :class:`GeoDataFrame.__geo_interface__`\n\n e.g.::\n\n lambda x: {\"color\":\"red\" if x[\"properties\"][\"gdp_md_est\"]<10**6\n else \"blue\"}\n\n Plus all supported by :func:`folium.vector_layers.path_options`. See the\n documentation of :class:`folium.features.GeoJson` for details.\n\n highlight_kwds : dict (default {})\n Style to be passed to folium highlight_function. Uses the same keywords\n as ``style_kwds``. When empty, defaults to ``{\"fillOpacity\": 0.75}``.\n tooltip_kwds : dict (default {})\n Additional keywords to be passed to :class:`folium.features.GeoJsonTooltip`,\n e.g. ``aliases``, ``labels``, or ``sticky``.\n popup_kwds : dict (default {})\n Additional keywords to be passed to :class:`folium.features.GeoJsonPopup`,\n e.g. ``aliases`` or ``labels``.\n legend_kwds : dict (default {})\n Additional keywords to be passed to the legend.\n\n Currently supported customisation:\n\n caption : string\n Custom caption of the legend. Defaults to the column name.\n\n Additional accepted keywords when ``scheme`` is specified:\n\n colorbar : bool (default True)\n An option to control the style of the legend. If True, continuous\n colorbar will be used. If False, categorical legend will be used for bins.\n scale : bool (default True)\n Scale bins along the colorbar axis according to the bin edges (True)\n or use the equal length for each bin (False)\n fmt : string (default \"{:.2f}\")\n A formatting specification for the bin edges of the classes in the\n legend. For example, to have no decimals: ``{\"fmt\": \"{:.0f}\"}``. Applies\n if ``colorbar=False``.\n labels : list-like\n A list of legend labels to override the auto-generated labels.\n Needs to have the same number of elements as the number of\n classes (`k`). Applies if ``colorbar=False``.\n interval : boolean (default False)\n An option to control brackets from mapclassify legend.\n If True, open/closed interval brackets are shown in the legend.\n Applies if ``colorbar=False``.\n max_labels : int, default 10\n Maximum number of colorbar tick labels (requires branca>=0.5.0)\n map_kwds : dict (default {})\n Additional keywords to be passed to folium :class:`~folium.folium.Map`,\n e.g. ``dragging``, or ``scrollWheelZoom``.\n\n\n **kwargs : dict\n Additional options to be passed on to the folium object.\n\n Returns\n -------\n m : folium.folium.Map\n folium :class:`~folium.folium.Map` instance\n\n Examples\n --------\n >>> import geodatasets\n >>> df = geopandas.read_file(\n ... geodatasets.get_path(\"geoda.chicago_health\")\n ... )\n >>> df.head(2) # doctest: +SKIP\n ComAreaID ... geometry\n 0 35 ... POLYGON ((-87.60914 41.84469, -87.60915 41.844...\n 1 36 ... POLYGON ((-87.59215 41.81693, -87.59231 41.816...\n\n [2 rows x 87 columns]\n\n >>> df.explore(\"Pop2012\", cmap=\"Blues\") # doctest: +SKIP\n \"\"\"\n\n def _colormap_helper(_cmap, n_resample=None, idx=None):\n \"\"\"Helper for MPL deprecation - GH#2596\"\"\"\n if not n_resample:\n return cm.get_cmap(_cmap)\n else:\n if MPL_361:\n return cm.get_cmap(_cmap).resampled(n_resample)(idx)\n else:\n return cm.get_cmap(_cmap, n_resample)(idx)\n\n try:\n import branca as bc\n import folium\n import re\n import matplotlib\n import matplotlib.colors as colors\n import matplotlib.pyplot as plt\n from mapclassify import classify\n\n # isolate MPL version - GH#2596\n MPL_361 = Version(matplotlib.__version__) >= Version(\"3.6.1\")\n if MPL_361:\n from matplotlib import colormaps as cm\n else:\n import matplotlib.cm as cm\n\n except (ImportError, ModuleNotFoundError):\n raise ImportError(\n \"The 'folium', 'matplotlib' and 'mapclassify' packages are required for \"\n \"'explore()'. You can install them using \"\n \"'conda install -c conda-forge folium matplotlib mapclassify' \"\n \"or 'pip install folium matplotlib mapclassify'.\"\n )\n\n # xyservices is an optional dependency\n try:\n import xyzservices\n\n HAS_XYZSERVICES = True\n except (ImportError, ModuleNotFoundError):\n HAS_XYZSERVICES = False\n\n gdf = df.copy()\n\n # convert LinearRing to LineString\n rings_mask = df.geom_type == \"LinearRing\"\n if rings_mask.any():\n gdf.geometry[rings_mask] = gdf.geometry[rings_mask].apply(\n lambda g: LineString(g)\n )\n\n if gdf.crs is None:\n kwargs[\"crs\"] = \"Simple\"\n tiles = None\n elif not gdf.crs.equals(4326):\n gdf = gdf.to_crs(4326)\n\n # create folium.Map object\n if m is None:\n # Get bounds to specify location and map extent\n bounds = gdf.total_bounds\n location = kwargs.pop(\"location\", None)\n if location is None:\n x = mean([bounds[0], bounds[2]])\n y = mean([bounds[1], bounds[3]])\n location = (y, x)\n if \"zoom_start\" in kwargs.keys():\n fit = False\n else:\n fit = True\n else:\n fit = False\n\n # get a subset of kwargs to be passed to folium.Map\n for i in _MAP_KWARGS:\n if i in map_kwds:\n raise ValueError(\n f\"'{i}' cannot be specified in 'map_kwds'. \"\n f\"Use the '{i}={map_kwds[i]}' argument instead.\"\n )\n map_kwds = {\n **map_kwds,\n **{i: kwargs[i] for i in kwargs.keys() if i in _MAP_KWARGS},\n }\n\n if HAS_XYZSERVICES:\n # match provider name string to xyzservices.TileProvider\n if isinstance(tiles, str):\n try:\n tiles = xyzservices.providers.query_name(tiles)\n except ValueError:\n pass\n\n if isinstance(tiles, xyzservices.TileProvider):\n attr = attr if attr else tiles.html_attribution\n if \"min_zoom\" not in map_kwds:\n map_kwds[\"min_zoom\"] = tiles.get(\"min_zoom\", 0)\n if \"max_zoom\" not in map_kwds:\n map_kwds[\"max_zoom\"] = tiles.get(\"max_zoom\", 18)\n tiles = tiles.build_url(scale_factor=\"{r}\")\n\n m = folium.Map(\n location=location,\n control_scale=control_scale,\n tiles=tiles,\n attr=attr,\n width=width,\n height=height,\n **map_kwds,\n )\n\n # fit bounds to get a proper zoom level\n if fit:\n m.fit_bounds([[bounds[1], bounds[0]], [bounds[3], bounds[2]]])\n\n for map_kwd in _MAP_KWARGS:\n kwargs.pop(map_kwd, None)\n\n nan_idx = None\n\n if column is not None:\n if pd.api.types.is_list_like(column):\n if len(column) != gdf.shape[0]:\n raise ValueError(\n \"The GeoDataFrame and given column have different number of rows.\"\n )\n else:\n column_name = \"__plottable_column\"\n gdf[column_name] = column\n column = column_name\n elif pd.api.types.is_categorical_dtype(gdf[column]):\n if categories is not None:\n raise ValueError(\n \"Cannot specify 'categories' when column has categorical dtype\"\n )\n categorical = True\n elif (\n pd.api.types.is_object_dtype(gdf[column])\n or pd.api.types.is_bool_dtype(gdf[column])\n or pd.api.types.is_string_dtype(gdf[column])\n or categories\n ):\n categorical = True\n\n nan_idx = pd.isna(gdf[column])\n\n if categorical:\n cat = pd.Categorical(gdf[column][~nan_idx], categories=categories)\n N = len(cat.categories)\n cmap = cmap if cmap else \"tab20\"\n\n # colormap exists in matplotlib\n if cmap in plt.colormaps():\n color = np.apply_along_axis(\n colors.to_hex,\n 1,\n _colormap_helper(cmap, n_resample=N, idx=cat.codes),\n )\n legend_colors = np.apply_along_axis(\n colors.to_hex, 1, _colormap_helper(cmap, n_resample=N, idx=range(N))\n )\n\n # colormap is matplotlib.Colormap\n elif isinstance(cmap, colors.Colormap):\n color = np.apply_along_axis(colors.to_hex, 1, cmap(cat.codes))\n legend_colors = np.apply_along_axis(colors.to_hex, 1, cmap(range(N)))\n\n # custom list of colors\n elif pd.api.types.is_list_like(cmap):\n if N > len(cmap):\n cmap = cmap * (N // len(cmap) + 1)\n color = np.take(cmap, cat.codes)\n legend_colors = np.take(cmap, range(N))\n\n else:\n raise ValueError(\n \"'cmap' is invalid. For categorical plots, pass either valid \"\n \"named matplotlib colormap or a list-like of colors.\"\n )\n\n elif callable(cmap):\n # List of colors based on Branca colormaps or self-defined functions\n color = list(map(lambda x: cmap(x), df[column]))\n\n else:\n vmin = gdf[column].min() if vmin is None else vmin\n vmax = gdf[column].max() if vmax is None else vmax\n\n # get bins\n if scheme is not None:\n if classification_kwds is None:\n classification_kwds = {}\n if \"k\" not in classification_kwds:\n classification_kwds[\"k\"] = k\n\n binning = classify(\n np.asarray(gdf[column][~nan_idx]), scheme, **classification_kwds\n )\n color = np.apply_along_axis(\n colors.to_hex,\n 1,\n _colormap_helper(cmap, n_resample=k, idx=binning.yb),\n )\n\n else:\n bins = np.linspace(vmin, vmax, 257)[1:]\n binning = classify(\n np.asarray(gdf[column][~nan_idx]), \"UserDefined\", bins=bins\n )\n\n color = np.apply_along_axis(\n colors.to_hex,\n 1,\n _colormap_helper(cmap, n_resample=256, idx=binning.yb),\n )\n\n # set default style\n if \"fillOpacity\" not in style_kwds:\n style_kwds[\"fillOpacity\"] = 0.5\n if \"weight\" not in style_kwds:\n style_kwds[\"weight\"] = 2\n if \"style_function\" in style_kwds:\n style_kwds_function = style_kwds[\"style_function\"]\n if not callable(style_kwds_function):\n raise ValueError(\"'style_function' has to be a callable\")\n style_kwds.pop(\"style_function\")\n else:\n\n def _no_style(x):\n return {}\n\n style_kwds_function = _no_style\n\n # specify color\n if color is not None:\n if (\n isinstance(color, str)\n and isinstance(gdf, geopandas.GeoDataFrame)\n and color in gdf.columns\n ): # use existing column\n\n def _style_color(x):\n base_style = {\n \"fillColor\": x[\"properties\"][color],\n **style_kwds,\n }\n return {\n **base_style,\n **style_kwds_function(x),\n }\n\n style_function = _style_color\n else: # assign new column\n if isinstance(gdf, geopandas.GeoSeries):\n gdf = geopandas.GeoDataFrame(geometry=gdf)\n\n if nan_idx is not None and nan_idx.any():\n nan_color = missing_kwds.pop(\"color\", None)\n\n gdf[\"__folium_color\"] = nan_color\n gdf.loc[~nan_idx, \"__folium_color\"] = color\n else:\n gdf[\"__folium_color\"] = color\n\n stroke_color = style_kwds.pop(\"color\", None)\n if not stroke_color:\n\n def _style_column(x):\n base_style = {\n \"fillColor\": x[\"properties\"][\"__folium_color\"],\n \"color\": x[\"properties\"][\"__folium_color\"],\n **style_kwds,\n }\n return {\n **base_style,\n **style_kwds_function(x),\n }\n\n style_function = _style_column\n else:\n\n def _style_stroke(x):\n base_style = {\n \"fillColor\": x[\"properties\"][\"__folium_color\"],\n \"color\": stroke_color,\n **style_kwds,\n }\n return {\n **base_style,\n **style_kwds_function(x),\n }\n\n style_function = _style_stroke\n else: # use folium default\n\n def _style_default(x):\n return {**style_kwds, **style_kwds_function(x)}\n\n style_function = _style_default\n\n if highlight:\n if \"fillOpacity\" not in highlight_kwds:\n highlight_kwds[\"fillOpacity\"] = 0.75\n\n def _style_highlight(x):\n return {**highlight_kwds}\n\n highlight_function = _style_highlight\n else:\n highlight_function = None\n\n # define default for points\n if marker_type is None:\n marker_type = \"circle_marker\"\n\n marker = marker_type\n if isinstance(marker_type, str):\n if marker_type == \"marker\":\n marker = folium.Marker(**marker_kwds)\n elif marker_type == \"circle\":\n marker = folium.Circle(**marker_kwds)\n elif marker_type == \"circle_marker\":\n marker_kwds[\"radius\"] = marker_kwds.get(\"radius\", 2)\n marker_kwds[\"fill\"] = marker_kwds.get(\"fill\", True)\n marker = folium.CircleMarker(**marker_kwds)\n else:\n raise ValueError(\n \"Only 'marker', 'circle', and 'circle_marker' are \"\n \"supported as marker values\"\n )\n\n # remove additional geometries\n if isinstance(gdf, geopandas.GeoDataFrame):\n non_active_geoms = [\n name\n for name, val in (gdf.dtypes == \"geometry\").items()\n if val and name != gdf.geometry.name\n ]\n gdf = gdf.drop(columns=non_active_geoms)\n\n # prepare tooltip and popup\n if isinstance(gdf, geopandas.GeoDataFrame):\n # add named index to the tooltip\n if gdf.index.name is not None:\n gdf = gdf.reset_index()\n # specify fields to show in the tooltip\n tooltip = _tooltip_popup(\"tooltip\", tooltip, gdf, **tooltip_kwds)\n popup = _tooltip_popup(\"popup\", popup, gdf, **popup_kwds)\n else:\n tooltip = None\n popup = None\n # escape the curly braces {{}} for jinja2 templates\n feature_collection = gdf.__geo_interface__\n for feature in feature_collection[\"features\"]:\n for k in feature[\"properties\"]:\n # escape the curly braces in values\n if type(feature[\"properties\"][k]) == str:\n feature[\"properties\"][k] = re.sub(\n r\"\\{{2,}\",\n lambda x: \"{% raw %}\" + x.group(0) + \"{% endraw %}\",\n feature[\"properties\"][k],\n )\n\n # add dataframe to map\n folium.GeoJson(\n feature_collection,\n tooltip=tooltip,\n popup=popup,\n marker=marker,\n style_function=style_function,\n highlight_function=highlight_function,\n **kwargs,\n ).add_to(m)\n\n if legend:\n # NOTE: overlaps will be resolved in branca #88\n caption = column if not column == \"__plottable_column\" else \"\"\n caption = legend_kwds.pop(\"caption\", caption)\n if categorical:\n categories = cat.categories.to_list()\n legend_colors = legend_colors.tolist()\n\n if nan_idx.any() and nan_color:\n categories.append(missing_kwds.pop(\"label\", \"NaN\"))\n legend_colors.append(nan_color)\n\n _categorical_legend(m, caption, categories, legend_colors)\n elif column is not None:\n cbar = legend_kwds.pop(\"colorbar\", True)\n colormap_kwds = {}\n if \"max_labels\" in legend_kwds:\n colormap_kwds[\"max_labels\"] = legend_kwds.pop(\"max_labels\")\n if scheme:\n cb_colors = np.apply_along_axis(\n colors.to_hex,\n 1,\n _colormap_helper(cmap, n_resample=binning.k, idx=range(binning.k)),\n )\n if cbar:\n if legend_kwds.pop(\"scale\", True):\n index = [vmin] + binning.bins.tolist()\n else:\n index = None\n colorbar = bc.colormap.StepColormap(\n cb_colors,\n vmin=vmin,\n vmax=vmax,\n caption=caption,\n index=index,\n **colormap_kwds,\n )\n else:\n fmt = legend_kwds.pop(\"fmt\", \"{:.2f}\")\n if \"labels\" in legend_kwds:\n categories = legend_kwds[\"labels\"]\n else:\n categories = binning.get_legend_classes(fmt)\n show_interval = legend_kwds.pop(\"interval\", False)\n if not show_interval:\n categories = [c[1:-1] for c in categories]\n\n if nan_idx.any() and nan_color:\n categories.append(missing_kwds.pop(\"label\", \"NaN\"))\n cb_colors = np.append(cb_colors, nan_color)\n _categorical_legend(m, caption, categories, cb_colors)\n\n else:\n if isinstance(cmap, bc.colormap.ColorMap):\n colorbar = cmap\n else:\n mp_cmap = _colormap_helper(cmap)\n cb_colors = np.apply_along_axis(\n colors.to_hex, 1, mp_cmap(range(mp_cmap.N))\n )\n\n # linear legend\n if mp_cmap.N > 20:\n colorbar = bc.colormap.LinearColormap(\n cb_colors,\n vmin=vmin,\n vmax=vmax,\n caption=caption,\n **colormap_kwds,\n )\n\n # steps\n else:\n colorbar = bc.colormap.StepColormap(\n cb_colors,\n vmin=vmin,\n vmax=vmax,\n caption=caption,\n **colormap_kwds,\n )\n\n if cbar:\n if nan_idx.any() and nan_color:\n _categorical_legend(\n m, \"\", [missing_kwds.pop(\"label\", \"NaN\")], [nan_color]\n )\n m.add_child(colorbar)\n\n return m\n\n\ndef _tooltip_popup(type, fields, gdf, **kwds):\n \"\"\"get tooltip or popup\"\"\"\n import folium\n\n # specify fields to show in the tooltip\n if fields is False or fields is None or fields == 0:\n return None\n else:\n if fields is True:\n fields = gdf.columns.drop(gdf.geometry.name).to_list()\n elif isinstance(fields, int):\n fields = gdf.columns.drop(gdf.geometry.name).to_list()[:fields]\n elif isinstance(fields, str):\n fields = [fields]\n\n for field in [\"__plottable_column\", \"__folium_color\"]:\n if field in fields:\n fields.remove(field)\n\n # Cast fields to str\n fields = list(map(str, fields))\n if type == \"tooltip\":\n return folium.GeoJsonTooltip(fields, **kwds)\n elif type == \"popup\":\n return folium.GeoJsonPopup(fields, **kwds)\n\n\ndef _categorical_legend(m, title, categories, colors):\n \"\"\"\n Add categorical legend to a map\n\n The implementation is using the code originally written by Michel Metran\n (@michelmetran) and released on GitHub\n (https://github.com/michelmetran/package_folium) under MIT license.\n\n Copyright (c) 2020 Michel Metran\n\n Parameters\n ----------\n m : folium.Map\n Existing map instance on which to draw the plot\n title : str\n title of the legend (e.g. column name)\n categories : list-like\n list of categories\n colors : list-like\n list of colors (in the same order as categories)\n \"\"\"\n\n # Header to Add\n head = \"\"\"\n {% macro header(this, kwargs) %}\n <script src=\"https://code.jquery.com/ui/1.12.1/jquery-ui.js\"></script>\n <script>$( function() {\n $( \".maplegend\" ).draggable({\n start: function (event, ui) {\n $(this).css({\n right: \"auto\",\n top: \"auto\",\n bottom: \"auto\"\n });\n }\n });\n });\n </script>\n <style type='text/css'>\n .maplegend {\n position: absolute;\n z-index:9999;\n background-color: rgba(255, 255, 255, .8);\n border-radius: 5px;\n box-shadow: 0 0 15px rgba(0,0,0,0.2);\n padding: 10px;\n font: 12px/14px Arial, Helvetica, sans-serif;\n right: 10px;\n bottom: 20px;\n }\n .maplegend .legend-title {\n text-align: left;\n margin-bottom: 5px;\n font-weight: bold;\n }\n .maplegend .legend-scale ul {\n margin: 0;\n margin-bottom: 0px;\n padding: 0;\n float: left;\n list-style: none;\n }\n .maplegend .legend-scale ul li {\n list-style: none;\n margin-left: 0;\n line-height: 16px;\n margin-bottom: 2px;\n }\n .maplegend ul.legend-labels li span {\n display: block;\n float: left;\n height: 14px;\n width: 14px;\n margin-right: 5px;\n margin-left: 0;\n border: 0px solid #ccc;\n }\n .maplegend .legend-source {\n color: #777;\n clear: both;\n }\n .maplegend a {\n color: #777;\n }\n </style>\n {% endmacro %}\n \"\"\"\n import branca as bc\n\n # Add CSS (on Header)\n macro = bc.element.MacroElement()\n macro._template = bc.element.Template(head)\n m.get_root().add_child(macro)\n\n body = f\"\"\"\n <div id='maplegend {title}' class='maplegend'>\n <div class='legend-title'>{title}</div>\n <div class='legend-scale'>\n <ul class='legend-labels'>\"\"\"\n\n # Loop Categories\n for label, color in zip(categories, colors):\n body += f\"\"\"\n <li><span style='background:{color}'></span>{label}</li>\"\"\"\n\n body += \"\"\"\n </ul>\n </div>\n </div>\n \"\"\"\n\n # Add Body\n body = bc.element.Element(body, \"legend\")\n m.get_root().html.add_child(body)\n\n\ndef _explore_geoseries(\n s,\n color=None,\n m=None,\n tiles=\"OpenStreetMap\",\n attr=None,\n highlight=True,\n width=\"100%\",\n height=\"100%\",\n control_scale=True,\n marker_type=None,\n marker_kwds={},\n style_kwds={},\n highlight_kwds={},\n map_kwds={},\n **kwargs,\n):\n \"\"\"Interactive map based on GeoPandas and folium/leaflet.js\n\n Generate an interactive leaflet map based on :class:`~geopandas.GeoSeries`\n\n Parameters\n ----------\n color : str, array-like (default None)\n Named color or a list-like of colors (named or hex).\n m : folium.Map (default None)\n Existing map instance on which to draw the plot.\n tiles : str, xyzservices.TileProvider (default 'OpenStreetMap Mapnik')\n Map tileset to use. Can choose from the list supported by folium, query a\n :class:`xyzservices.TileProvider` by a name from ``xyzservices.providers``,\n pass :class:`xyzservices.TileProvider` object or pass custom XYZ URL.\n The current list of built-in providers (when ``xyzservices`` is not available):\n\n ``[\"OpenStreetMap\", \"Stamen Terrain\", “Stamen Toner\", “Stamen Watercolor\"\n \"CartoDB positron\", “CartoDB dark_matter\"]``\n\n You can pass a custom tileset to Folium by passing a Leaflet-style URL\n to the tiles parameter: ``http://{s}.yourtiles.com/{z}/{x}/{y}.png``.\n Be sure to check their terms and conditions and to provide attribution with\n the ``attr`` keyword.\n attr : str (default None)\n Map tile attribution; only required if passing custom tile URL.\n highlight : bool (default True)\n Enable highlight functionality when hovering over a geometry.\n width : pixel int or percentage string (default: '100%')\n Width of the folium :class:`~folium.folium.Map`. If the argument\n m is given explicitly, width is ignored.\n height : pixel int or percentage string (default: '100%')\n Height of the folium :class:`~folium.folium.Map`. If the argument\n m is given explicitly, height is ignored.\n control_scale : bool, (default True)\n Whether to add a control scale on the map.\n marker_type : str, folium.Circle, folium.CircleMarker, folium.Marker (default None)\n Allowed string options are ('marker', 'circle', 'circle_marker'). Defaults to\n folium.Marker.\n marker_kwds: dict (default {})\n Additional keywords to be passed to the selected ``marker_type``, e.g.:\n\n radius : float\n Radius of the circle, in meters (for ``'circle'``) or pixels\n (for ``circle_marker``).\n icon : folium.map.Icon\n the :class:`folium.map.Icon` object to use to render the marker.\n draggable : bool (default False)\n Set to True to be able to drag the marker around the map.\n\n style_kwds : dict (default {})\n Additional style to be passed to folium ``style_function``:\n\n stroke : bool (default True)\n Whether to draw stroke along the path. Set it to ``False`` to\n disable borders on polygons or circles.\n color : str\n Stroke color\n weight : int\n Stroke width in pixels\n opacity : float (default 1.0)\n Stroke opacity\n fill : boolean (default True)\n Whether to fill the path with color. Set it to ``False`` to\n disable filling on polygons or circles.\n fillColor : str\n Fill color. Defaults to the value of the color option\n fillOpacity : float (default 0.5)\n Fill opacity.\n style_function : callable\n Function mapping a GeoJson Feature to a style ``dict``.\n\n * Style properties :func:`folium.vector_layers.path_options`\n * GeoJson features :class:`GeoSeries.__geo_interface__`\n\n e.g.::\n\n lambda x: {\"color\":\"red\" if x[\"properties\"][\"gdp_md_est\"]<10**6\n else \"blue\"}\n\n\n Plus all supported by :func:`folium.vector_layers.path_options`. See the\n documentation of :class:`folium.features.GeoJson` for details.\n\n highlight_kwds : dict (default {})\n Style to be passed to folium highlight_function. Uses the same keywords\n as ``style_kwds``. When empty, defaults to ``{\"fillOpacity\": 0.75}``.\n map_kwds : dict (default {})\n Additional keywords to be passed to folium :class:`~folium.folium.Map`,\n e.g. ``dragging``, or ``scrollWheelZoom``.\n\n **kwargs : dict\n Additional options to be passed on to the folium.\n\n Returns\n -------\n m : folium.folium.Map\n folium :class:`~folium.folium.Map` instance\n\n \"\"\"\n return _explore(\n s,\n color=color,\n m=m,\n tiles=tiles,\n attr=attr,\n highlight=highlight,\n width=width,\n height=height,\n control_scale=control_scale,\n marker_type=marker_type,\n marker_kwds=marker_kwds,\n style_kwds=style_kwds,\n highlight_kwds=highlight_kwds,\n map_kwds=map_kwds,\n **kwargs,\n )\n", "path": "geopandas/explore.py" } ]
[ { "content": "from statistics import mean\n\nimport geopandas\nfrom shapely.geometry import LineString\nimport numpy as np\nimport pandas as pd\n\nfrom packaging.version import Version\n\n_MAP_KWARGS = [\n \"location\",\n \"prefer_canvas\",\n \"no_touch\",\n \"disable_3d\",\n \"png_enabled\",\n \"zoom_control\",\n \"crs\",\n \"zoom_start\",\n \"left\",\n \"top\",\n \"position\",\n \"min_zoom\",\n \"max_zoom\",\n \"min_lat\",\n \"max_lat\",\n \"min_lon\",\n \"max_lon\",\n \"max_bounds\",\n]\n\n\ndef _explore(\n df,\n column=None,\n cmap=None,\n color=None,\n m=None,\n tiles=\"OpenStreetMap\",\n attr=None,\n tooltip=True,\n popup=False,\n highlight=True,\n categorical=False,\n legend=True,\n scheme=None,\n k=5,\n vmin=None,\n vmax=None,\n width=\"100%\",\n height=\"100%\",\n categories=None,\n classification_kwds=None,\n control_scale=True,\n marker_type=None,\n marker_kwds={},\n style_kwds={},\n highlight_kwds={},\n missing_kwds={},\n tooltip_kwds={},\n popup_kwds={},\n legend_kwds={},\n map_kwds={},\n **kwargs,\n):\n \"\"\"Interactive map based on GeoPandas and folium/leaflet.js\n\n Generate an interactive leaflet map based on :class:`~geopandas.GeoDataFrame`\n\n Parameters\n ----------\n column : str, np.array, pd.Series (default None)\n The name of the dataframe column, :class:`numpy.array`,\n or :class:`pandas.Series` to be plotted. If :class:`numpy.array` or\n :class:`pandas.Series` are used then it must have same length as dataframe.\n cmap : str, matplotlib.Colormap, branca.colormap or function (default None)\n The name of a colormap recognized by ``matplotlib``, a list-like of colors,\n :class:`matplotlib.colors.Colormap`, a :class:`branca.colormap.ColorMap` or\n function that returns a named color or hex based on the column\n value, e.g.::\n\n def my_colormap(value): # scalar value defined in 'column'\n if value > 1:\n return \"green\"\n return \"red\"\n\n color : str, array-like (default None)\n Named color or a list-like of colors (named or hex).\n m : folium.Map (default None)\n Existing map instance on which to draw the plot.\n tiles : str, xyzservices.TileProvider (default 'OpenStreetMap Mapnik')\n Map tileset to use. Can choose from the list supported by folium, query a\n :class:`xyzservices.TileProvider` by a name from ``xyzservices.providers``,\n pass :class:`xyzservices.TileProvider` object or pass custom XYZ URL.\n The current list of built-in providers (when ``xyzservices`` is not available):\n\n ``[\"OpenStreetMap\", \"Stamen Terrain\", “Stamen Toner\", “Stamen Watercolor\"\n \"CartoDB positron\", “CartoDB dark_matter\"]``\n\n You can pass a custom tileset to Folium by passing a Leaflet-style URL\n to the tiles parameter: ``http://{s}.yourtiles.com/{z}/{x}/{y}.png``.\n Be sure to check their terms and conditions and to provide attribution with\n the ``attr`` keyword.\n attr : str (default None)\n Map tile attribution; only required if passing custom tile URL.\n tooltip : bool, str, int, list (default True)\n Display GeoDataFrame attributes when hovering over the object.\n ``True`` includes all columns. ``False`` removes tooltip. Pass string or list of\n strings to specify a column(s). Integer specifies first n columns to be\n included. Defaults to ``True``.\n popup : bool, str, int, list (default False)\n Input GeoDataFrame attributes for object displayed when clicking.\n ``True`` includes all columns. ``False`` removes popup. Pass string or list of\n strings to specify a column(s). Integer specifies first n columns to be\n included. Defaults to ``False``.\n highlight : bool (default True)\n Enable highlight functionality when hovering over a geometry.\n categorical : bool (default False)\n If ``False``, ``cmap`` will reflect numerical values of the\n column being plotted. For non-numerical columns, this\n will be set to True.\n legend : bool (default True)\n Plot a legend in choropleth plots.\n Ignored if no ``column`` is given.\n scheme : str (default None)\n Name of a choropleth classification scheme (requires ``mapclassify`` >= 2.4.0).\n A :func:`mapclassify.classify` will be used\n under the hood. Supported are all schemes provided by ``mapclassify`` (e.g.\n ``'BoxPlot'``, ``'EqualInterval'``, ``'FisherJenks'``, ``'FisherJenksSampled'``,\n ``'HeadTailBreaks'``, ``'JenksCaspall'``, ``'JenksCaspallForced'``,\n ``'JenksCaspallSampled'``, ``'MaxP'``, ``'MaximumBreaks'``,\n ``'NaturalBreaks'``, ``'Quantiles'``, ``'Percentiles'``, ``'StdMean'``,\n ``'UserDefined'``). Arguments can be passed in ``classification_kwds``.\n k : int (default 5)\n Number of classes\n vmin : None or float (default None)\n Minimum value of ``cmap``. If ``None``, the minimum data value\n in the column to be plotted is used.\n vmax : None or float (default None)\n Maximum value of ``cmap``. If ``None``, the maximum data value\n in the column to be plotted is used.\n width : pixel int or percentage string (default: '100%')\n Width of the folium :class:`~folium.folium.Map`. If the argument\n m is given explicitly, width is ignored.\n height : pixel int or percentage string (default: '100%')\n Height of the folium :class:`~folium.folium.Map`. If the argument\n m is given explicitly, height is ignored.\n categories : list-like\n Ordered list-like object of categories to be used for categorical plot.\n classification_kwds : dict (default None)\n Keyword arguments to pass to mapclassify\n control_scale : bool, (default True)\n Whether to add a control scale on the map.\n marker_type : str, folium.Circle, folium.CircleMarker, folium.Marker (default None)\n Allowed string options are ('marker', 'circle', 'circle_marker'). Defaults to\n folium.CircleMarker.\n marker_kwds: dict (default {})\n Additional keywords to be passed to the selected ``marker_type``, e.g.:\n\n radius : float (default 2 for ``circle_marker`` and 50 for ``circle``))\n Radius of the circle, in meters (for ``circle``) or pixels\n (for ``circle_marker``).\n fill : bool (default True)\n Whether to fill the ``circle`` or ``circle_marker`` with color.\n icon : folium.map.Icon\n the :class:`folium.map.Icon` object to use to render the marker.\n draggable : bool (default False)\n Set to True to be able to drag the marker around the map.\n\n style_kwds : dict (default {})\n Additional style to be passed to folium ``style_function``:\n\n stroke : bool (default True)\n Whether to draw stroke along the path. Set it to ``False`` to\n disable borders on polygons or circles.\n color : str\n Stroke color\n weight : int\n Stroke width in pixels\n opacity : float (default 1.0)\n Stroke opacity\n fill : boolean (default True)\n Whether to fill the path with color. Set it to ``False`` to\n disable filling on polygons or circles.\n fillColor : str\n Fill color. Defaults to the value of the color option\n fillOpacity : float (default 0.5)\n Fill opacity.\n style_function : callable\n Function mapping a GeoJson Feature to a style ``dict``.\n\n * Style properties :func:`folium.vector_layers.path_options`\n * GeoJson features :class:`GeoDataFrame.__geo_interface__`\n\n e.g.::\n\n lambda x: {\"color\":\"red\" if x[\"properties\"][\"gdp_md_est\"]<10**6\n else \"blue\"}\n\n Plus all supported by :func:`folium.vector_layers.path_options`. See the\n documentation of :class:`folium.features.GeoJson` for details.\n\n highlight_kwds : dict (default {})\n Style to be passed to folium highlight_function. Uses the same keywords\n as ``style_kwds``. When empty, defaults to ``{\"fillOpacity\": 0.75}``.\n tooltip_kwds : dict (default {})\n Additional keywords to be passed to :class:`folium.features.GeoJsonTooltip`,\n e.g. ``aliases``, ``labels``, or ``sticky``.\n popup_kwds : dict (default {})\n Additional keywords to be passed to :class:`folium.features.GeoJsonPopup`,\n e.g. ``aliases`` or ``labels``.\n legend_kwds : dict (default {})\n Additional keywords to be passed to the legend.\n\n Currently supported customisation:\n\n caption : string\n Custom caption of the legend. Defaults to the column name.\n\n Additional accepted keywords when ``scheme`` is specified:\n\n colorbar : bool (default True)\n An option to control the style of the legend. If True, continuous\n colorbar will be used. If False, categorical legend will be used for bins.\n scale : bool (default True)\n Scale bins along the colorbar axis according to the bin edges (True)\n or use the equal length for each bin (False)\n fmt : string (default \"{:.2f}\")\n A formatting specification for the bin edges of the classes in the\n legend. For example, to have no decimals: ``{\"fmt\": \"{:.0f}\"}``. Applies\n if ``colorbar=False``.\n labels : list-like\n A list of legend labels to override the auto-generated labels.\n Needs to have the same number of elements as the number of\n classes (`k`). Applies if ``colorbar=False``.\n interval : boolean (default False)\n An option to control brackets from mapclassify legend.\n If True, open/closed interval brackets are shown in the legend.\n Applies if ``colorbar=False``.\n max_labels : int, default 10\n Maximum number of colorbar tick labels (requires branca>=0.5.0)\n map_kwds : dict (default {})\n Additional keywords to be passed to folium :class:`~folium.folium.Map`,\n e.g. ``dragging``, or ``scrollWheelZoom``.\n\n\n **kwargs : dict\n Additional options to be passed on to the folium object.\n\n Returns\n -------\n m : folium.folium.Map\n folium :class:`~folium.folium.Map` instance\n\n Examples\n --------\n >>> import geodatasets\n >>> df = geopandas.read_file(\n ... geodatasets.get_path(\"geoda.chicago_health\")\n ... )\n >>> df.head(2) # doctest: +SKIP\n ComAreaID ... geometry\n 0 35 ... POLYGON ((-87.60914 41.84469, -87.60915 41.844...\n 1 36 ... POLYGON ((-87.59215 41.81693, -87.59231 41.816...\n\n [2 rows x 87 columns]\n\n >>> df.explore(\"Pop2012\", cmap=\"Blues\") # doctest: +SKIP\n \"\"\"\n\n def _colormap_helper(_cmap, n_resample=None, idx=None):\n \"\"\"Helper for MPL deprecation - GH#2596\"\"\"\n if not n_resample:\n return cm.get_cmap(_cmap)\n else:\n if MPL_361:\n return cm.get_cmap(_cmap).resampled(n_resample)(idx)\n else:\n return cm.get_cmap(_cmap, n_resample)(idx)\n\n try:\n import branca as bc\n import folium\n import re\n import matplotlib\n import matplotlib.colors as colors\n import matplotlib.pyplot as plt\n from mapclassify import classify\n\n # isolate MPL version - GH#2596\n MPL_361 = Version(matplotlib.__version__) >= Version(\"3.6.1\")\n if MPL_361:\n from matplotlib import colormaps as cm\n else:\n import matplotlib.cm as cm\n\n except (ImportError, ModuleNotFoundError):\n raise ImportError(\n \"The 'folium', 'matplotlib' and 'mapclassify' packages are required for \"\n \"'explore()'. You can install them using \"\n \"'conda install -c conda-forge folium matplotlib mapclassify' \"\n \"or 'pip install folium matplotlib mapclassify'.\"\n )\n\n # xyservices is an optional dependency\n try:\n import xyzservices\n\n HAS_XYZSERVICES = True\n except (ImportError, ModuleNotFoundError):\n HAS_XYZSERVICES = False\n\n gdf = df.copy()\n\n # convert LinearRing to LineString\n rings_mask = df.geom_type == \"LinearRing\"\n if rings_mask.any():\n gdf.geometry[rings_mask] = gdf.geometry[rings_mask].apply(\n lambda g: LineString(g)\n )\n\n if gdf.crs is None:\n kwargs[\"crs\"] = \"Simple\"\n tiles = None\n elif not gdf.crs.equals(4326):\n gdf = gdf.to_crs(4326)\n\n # create folium.Map object\n if m is None:\n # Get bounds to specify location and map extent\n bounds = gdf.total_bounds\n location = kwargs.pop(\"location\", None)\n if location is None:\n x = mean([bounds[0], bounds[2]])\n y = mean([bounds[1], bounds[3]])\n location = (y, x)\n if \"zoom_start\" in kwargs.keys():\n fit = False\n else:\n fit = True\n else:\n fit = False\n\n # get a subset of kwargs to be passed to folium.Map\n for i in _MAP_KWARGS:\n if i in map_kwds:\n raise ValueError(\n f\"'{i}' cannot be specified in 'map_kwds'. \"\n f\"Use the '{i}={map_kwds[i]}' argument instead.\"\n )\n map_kwds = {\n **map_kwds,\n **{i: kwargs[i] for i in kwargs.keys() if i in _MAP_KWARGS},\n }\n\n if HAS_XYZSERVICES:\n # match provider name string to xyzservices.TileProvider\n if isinstance(tiles, str):\n try:\n tiles = xyzservices.providers.query_name(tiles)\n except ValueError:\n pass\n\n if isinstance(tiles, xyzservices.TileProvider):\n attr = attr if attr else tiles.html_attribution\n if \"min_zoom\" not in map_kwds:\n map_kwds[\"min_zoom\"] = tiles.get(\"min_zoom\", 0)\n if \"max_zoom\" not in map_kwds:\n map_kwds[\"max_zoom\"] = tiles.get(\"max_zoom\", 18)\n tiles = tiles.build_url(scale_factor=\"{r}\")\n\n m = folium.Map(\n location=location,\n control_scale=control_scale,\n tiles=tiles,\n attr=attr,\n width=width,\n height=height,\n **map_kwds,\n )\n\n # fit bounds to get a proper zoom level\n if fit:\n m.fit_bounds([[bounds[1], bounds[0]], [bounds[3], bounds[2]]])\n\n for map_kwd in _MAP_KWARGS:\n kwargs.pop(map_kwd, None)\n\n nan_idx = None\n\n if column is not None:\n if pd.api.types.is_list_like(column):\n if len(column) != gdf.shape[0]:\n raise ValueError(\n \"The GeoDataFrame and given column have different number of rows.\"\n )\n else:\n column_name = \"__plottable_column\"\n gdf[column_name] = column\n column = column_name\n elif pd.api.types.is_categorical_dtype(gdf[column]):\n if categories is not None:\n raise ValueError(\n \"Cannot specify 'categories' when column has categorical dtype\"\n )\n categorical = True\n elif (\n pd.api.types.is_object_dtype(gdf[column])\n or pd.api.types.is_bool_dtype(gdf[column])\n or pd.api.types.is_string_dtype(gdf[column])\n or categories\n ):\n categorical = True\n\n nan_idx = pd.isna(gdf[column])\n\n if categorical:\n cat = pd.Categorical(gdf[column][~nan_idx], categories=categories)\n N = len(cat.categories)\n cmap = cmap if cmap else \"tab20\"\n\n # colormap exists in matplotlib\n if cmap in plt.colormaps():\n color = np.apply_along_axis(\n colors.to_hex,\n 1,\n _colormap_helper(cmap, n_resample=N, idx=cat.codes),\n )\n legend_colors = np.apply_along_axis(\n colors.to_hex, 1, _colormap_helper(cmap, n_resample=N, idx=range(N))\n )\n\n # colormap is matplotlib.Colormap\n elif isinstance(cmap, colors.Colormap):\n color = np.apply_along_axis(colors.to_hex, 1, cmap(cat.codes))\n legend_colors = np.apply_along_axis(colors.to_hex, 1, cmap(range(N)))\n\n # custom list of colors\n elif pd.api.types.is_list_like(cmap):\n if N > len(cmap):\n cmap = cmap * (N // len(cmap) + 1)\n color = np.take(cmap, cat.codes)\n legend_colors = np.take(cmap, range(N))\n\n else:\n raise ValueError(\n \"'cmap' is invalid. For categorical plots, pass either valid \"\n \"named matplotlib colormap or a list-like of colors.\"\n )\n\n elif callable(cmap):\n # List of colors based on Branca colormaps or self-defined functions\n color = list(map(lambda x: cmap(x), df[column]))\n\n else:\n vmin = gdf[column].min() if vmin is None else vmin\n vmax = gdf[column].max() if vmax is None else vmax\n\n # get bins\n if scheme is not None:\n if classification_kwds is None:\n classification_kwds = {}\n if \"k\" not in classification_kwds:\n classification_kwds[\"k\"] = k\n\n binning = classify(\n np.asarray(gdf[column][~nan_idx]), scheme, **classification_kwds\n )\n color = np.apply_along_axis(\n colors.to_hex,\n 1,\n _colormap_helper(cmap, n_resample=binning.k, idx=binning.yb),\n )\n\n else:\n bins = np.linspace(vmin, vmax, 257)[1:]\n binning = classify(\n np.asarray(gdf[column][~nan_idx]), \"UserDefined\", bins=bins\n )\n\n color = np.apply_along_axis(\n colors.to_hex,\n 1,\n _colormap_helper(cmap, n_resample=256, idx=binning.yb),\n )\n\n # set default style\n if \"fillOpacity\" not in style_kwds:\n style_kwds[\"fillOpacity\"] = 0.5\n if \"weight\" not in style_kwds:\n style_kwds[\"weight\"] = 2\n if \"style_function\" in style_kwds:\n style_kwds_function = style_kwds[\"style_function\"]\n if not callable(style_kwds_function):\n raise ValueError(\"'style_function' has to be a callable\")\n style_kwds.pop(\"style_function\")\n else:\n\n def _no_style(x):\n return {}\n\n style_kwds_function = _no_style\n\n # specify color\n if color is not None:\n if (\n isinstance(color, str)\n and isinstance(gdf, geopandas.GeoDataFrame)\n and color in gdf.columns\n ): # use existing column\n\n def _style_color(x):\n base_style = {\n \"fillColor\": x[\"properties\"][color],\n **style_kwds,\n }\n return {\n **base_style,\n **style_kwds_function(x),\n }\n\n style_function = _style_color\n else: # assign new column\n if isinstance(gdf, geopandas.GeoSeries):\n gdf = geopandas.GeoDataFrame(geometry=gdf)\n\n if nan_idx is not None and nan_idx.any():\n nan_color = missing_kwds.pop(\"color\", None)\n\n gdf[\"__folium_color\"] = nan_color\n gdf.loc[~nan_idx, \"__folium_color\"] = color\n else:\n gdf[\"__folium_color\"] = color\n\n stroke_color = style_kwds.pop(\"color\", None)\n if not stroke_color:\n\n def _style_column(x):\n base_style = {\n \"fillColor\": x[\"properties\"][\"__folium_color\"],\n \"color\": x[\"properties\"][\"__folium_color\"],\n **style_kwds,\n }\n return {\n **base_style,\n **style_kwds_function(x),\n }\n\n style_function = _style_column\n else:\n\n def _style_stroke(x):\n base_style = {\n \"fillColor\": x[\"properties\"][\"__folium_color\"],\n \"color\": stroke_color,\n **style_kwds,\n }\n return {\n **base_style,\n **style_kwds_function(x),\n }\n\n style_function = _style_stroke\n else: # use folium default\n\n def _style_default(x):\n return {**style_kwds, **style_kwds_function(x)}\n\n style_function = _style_default\n\n if highlight:\n if \"fillOpacity\" not in highlight_kwds:\n highlight_kwds[\"fillOpacity\"] = 0.75\n\n def _style_highlight(x):\n return {**highlight_kwds}\n\n highlight_function = _style_highlight\n else:\n highlight_function = None\n\n # define default for points\n if marker_type is None:\n marker_type = \"circle_marker\"\n\n marker = marker_type\n if isinstance(marker_type, str):\n if marker_type == \"marker\":\n marker = folium.Marker(**marker_kwds)\n elif marker_type == \"circle\":\n marker = folium.Circle(**marker_kwds)\n elif marker_type == \"circle_marker\":\n marker_kwds[\"radius\"] = marker_kwds.get(\"radius\", 2)\n marker_kwds[\"fill\"] = marker_kwds.get(\"fill\", True)\n marker = folium.CircleMarker(**marker_kwds)\n else:\n raise ValueError(\n \"Only 'marker', 'circle', and 'circle_marker' are \"\n \"supported as marker values\"\n )\n\n # remove additional geometries\n if isinstance(gdf, geopandas.GeoDataFrame):\n non_active_geoms = [\n name\n for name, val in (gdf.dtypes == \"geometry\").items()\n if val and name != gdf.geometry.name\n ]\n gdf = gdf.drop(columns=non_active_geoms)\n\n # prepare tooltip and popup\n if isinstance(gdf, geopandas.GeoDataFrame):\n # add named index to the tooltip\n if gdf.index.name is not None:\n gdf = gdf.reset_index()\n # specify fields to show in the tooltip\n tooltip = _tooltip_popup(\"tooltip\", tooltip, gdf, **tooltip_kwds)\n popup = _tooltip_popup(\"popup\", popup, gdf, **popup_kwds)\n else:\n tooltip = None\n popup = None\n # escape the curly braces {{}} for jinja2 templates\n feature_collection = gdf.__geo_interface__\n for feature in feature_collection[\"features\"]:\n for k in feature[\"properties\"]:\n # escape the curly braces in values\n if type(feature[\"properties\"][k]) == str:\n feature[\"properties\"][k] = re.sub(\n r\"\\{{2,}\",\n lambda x: \"{% raw %}\" + x.group(0) + \"{% endraw %}\",\n feature[\"properties\"][k],\n )\n\n # add dataframe to map\n folium.GeoJson(\n feature_collection,\n tooltip=tooltip,\n popup=popup,\n marker=marker,\n style_function=style_function,\n highlight_function=highlight_function,\n **kwargs,\n ).add_to(m)\n\n if legend:\n # NOTE: overlaps will be resolved in branca #88\n caption = column if not column == \"__plottable_column\" else \"\"\n caption = legend_kwds.pop(\"caption\", caption)\n if categorical:\n categories = cat.categories.to_list()\n legend_colors = legend_colors.tolist()\n\n if nan_idx.any() and nan_color:\n categories.append(missing_kwds.pop(\"label\", \"NaN\"))\n legend_colors.append(nan_color)\n\n _categorical_legend(m, caption, categories, legend_colors)\n elif column is not None:\n cbar = legend_kwds.pop(\"colorbar\", True)\n colormap_kwds = {}\n if \"max_labels\" in legend_kwds:\n colormap_kwds[\"max_labels\"] = legend_kwds.pop(\"max_labels\")\n if scheme:\n cb_colors = np.apply_along_axis(\n colors.to_hex,\n 1,\n _colormap_helper(cmap, n_resample=binning.k, idx=range(binning.k)),\n )\n if cbar:\n if legend_kwds.pop(\"scale\", True):\n index = [vmin] + binning.bins.tolist()\n else:\n index = None\n colorbar = bc.colormap.StepColormap(\n cb_colors,\n vmin=vmin,\n vmax=vmax,\n caption=caption,\n index=index,\n **colormap_kwds,\n )\n else:\n fmt = legend_kwds.pop(\"fmt\", \"{:.2f}\")\n if \"labels\" in legend_kwds:\n categories = legend_kwds[\"labels\"]\n else:\n categories = binning.get_legend_classes(fmt)\n show_interval = legend_kwds.pop(\"interval\", False)\n if not show_interval:\n categories = [c[1:-1] for c in categories]\n\n if nan_idx.any() and nan_color:\n categories.append(missing_kwds.pop(\"label\", \"NaN\"))\n cb_colors = np.append(cb_colors, nan_color)\n _categorical_legend(m, caption, categories, cb_colors)\n\n else:\n if isinstance(cmap, bc.colormap.ColorMap):\n colorbar = cmap\n else:\n mp_cmap = _colormap_helper(cmap)\n cb_colors = np.apply_along_axis(\n colors.to_hex, 1, mp_cmap(range(mp_cmap.N))\n )\n\n # linear legend\n if mp_cmap.N > 20:\n colorbar = bc.colormap.LinearColormap(\n cb_colors,\n vmin=vmin,\n vmax=vmax,\n caption=caption,\n **colormap_kwds,\n )\n\n # steps\n else:\n colorbar = bc.colormap.StepColormap(\n cb_colors,\n vmin=vmin,\n vmax=vmax,\n caption=caption,\n **colormap_kwds,\n )\n\n if cbar:\n if nan_idx.any() and nan_color:\n _categorical_legend(\n m, \"\", [missing_kwds.pop(\"label\", \"NaN\")], [nan_color]\n )\n m.add_child(colorbar)\n\n return m\n\n\ndef _tooltip_popup(type, fields, gdf, **kwds):\n \"\"\"get tooltip or popup\"\"\"\n import folium\n\n # specify fields to show in the tooltip\n if fields is False or fields is None or fields == 0:\n return None\n else:\n if fields is True:\n fields = gdf.columns.drop(gdf.geometry.name).to_list()\n elif isinstance(fields, int):\n fields = gdf.columns.drop(gdf.geometry.name).to_list()[:fields]\n elif isinstance(fields, str):\n fields = [fields]\n\n for field in [\"__plottable_column\", \"__folium_color\"]:\n if field in fields:\n fields.remove(field)\n\n # Cast fields to str\n fields = list(map(str, fields))\n if type == \"tooltip\":\n return folium.GeoJsonTooltip(fields, **kwds)\n elif type == \"popup\":\n return folium.GeoJsonPopup(fields, **kwds)\n\n\ndef _categorical_legend(m, title, categories, colors):\n \"\"\"\n Add categorical legend to a map\n\n The implementation is using the code originally written by Michel Metran\n (@michelmetran) and released on GitHub\n (https://github.com/michelmetran/package_folium) under MIT license.\n\n Copyright (c) 2020 Michel Metran\n\n Parameters\n ----------\n m : folium.Map\n Existing map instance on which to draw the plot\n title : str\n title of the legend (e.g. column name)\n categories : list-like\n list of categories\n colors : list-like\n list of colors (in the same order as categories)\n \"\"\"\n\n # Header to Add\n head = \"\"\"\n {% macro header(this, kwargs) %}\n <script src=\"https://code.jquery.com/ui/1.12.1/jquery-ui.js\"></script>\n <script>$( function() {\n $( \".maplegend\" ).draggable({\n start: function (event, ui) {\n $(this).css({\n right: \"auto\",\n top: \"auto\",\n bottom: \"auto\"\n });\n }\n });\n });\n </script>\n <style type='text/css'>\n .maplegend {\n position: absolute;\n z-index:9999;\n background-color: rgba(255, 255, 255, .8);\n border-radius: 5px;\n box-shadow: 0 0 15px rgba(0,0,0,0.2);\n padding: 10px;\n font: 12px/14px Arial, Helvetica, sans-serif;\n right: 10px;\n bottom: 20px;\n }\n .maplegend .legend-title {\n text-align: left;\n margin-bottom: 5px;\n font-weight: bold;\n }\n .maplegend .legend-scale ul {\n margin: 0;\n margin-bottom: 0px;\n padding: 0;\n float: left;\n list-style: none;\n }\n .maplegend .legend-scale ul li {\n list-style: none;\n margin-left: 0;\n line-height: 16px;\n margin-bottom: 2px;\n }\n .maplegend ul.legend-labels li span {\n display: block;\n float: left;\n height: 14px;\n width: 14px;\n margin-right: 5px;\n margin-left: 0;\n border: 0px solid #ccc;\n }\n .maplegend .legend-source {\n color: #777;\n clear: both;\n }\n .maplegend a {\n color: #777;\n }\n </style>\n {% endmacro %}\n \"\"\"\n import branca as bc\n\n # Add CSS (on Header)\n macro = bc.element.MacroElement()\n macro._template = bc.element.Template(head)\n m.get_root().add_child(macro)\n\n body = f\"\"\"\n <div id='maplegend {title}' class='maplegend'>\n <div class='legend-title'>{title}</div>\n <div class='legend-scale'>\n <ul class='legend-labels'>\"\"\"\n\n # Loop Categories\n for label, color in zip(categories, colors):\n body += f\"\"\"\n <li><span style='background:{color}'></span>{label}</li>\"\"\"\n\n body += \"\"\"\n </ul>\n </div>\n </div>\n \"\"\"\n\n # Add Body\n body = bc.element.Element(body, \"legend\")\n m.get_root().html.add_child(body)\n\n\ndef _explore_geoseries(\n s,\n color=None,\n m=None,\n tiles=\"OpenStreetMap\",\n attr=None,\n highlight=True,\n width=\"100%\",\n height=\"100%\",\n control_scale=True,\n marker_type=None,\n marker_kwds={},\n style_kwds={},\n highlight_kwds={},\n map_kwds={},\n **kwargs,\n):\n \"\"\"Interactive map based on GeoPandas and folium/leaflet.js\n\n Generate an interactive leaflet map based on :class:`~geopandas.GeoSeries`\n\n Parameters\n ----------\n color : str, array-like (default None)\n Named color or a list-like of colors (named or hex).\n m : folium.Map (default None)\n Existing map instance on which to draw the plot.\n tiles : str, xyzservices.TileProvider (default 'OpenStreetMap Mapnik')\n Map tileset to use. Can choose from the list supported by folium, query a\n :class:`xyzservices.TileProvider` by a name from ``xyzservices.providers``,\n pass :class:`xyzservices.TileProvider` object or pass custom XYZ URL.\n The current list of built-in providers (when ``xyzservices`` is not available):\n\n ``[\"OpenStreetMap\", \"Stamen Terrain\", “Stamen Toner\", “Stamen Watercolor\"\n \"CartoDB positron\", “CartoDB dark_matter\"]``\n\n You can pass a custom tileset to Folium by passing a Leaflet-style URL\n to the tiles parameter: ``http://{s}.yourtiles.com/{z}/{x}/{y}.png``.\n Be sure to check their terms and conditions and to provide attribution with\n the ``attr`` keyword.\n attr : str (default None)\n Map tile attribution; only required if passing custom tile URL.\n highlight : bool (default True)\n Enable highlight functionality when hovering over a geometry.\n width : pixel int or percentage string (default: '100%')\n Width of the folium :class:`~folium.folium.Map`. If the argument\n m is given explicitly, width is ignored.\n height : pixel int or percentage string (default: '100%')\n Height of the folium :class:`~folium.folium.Map`. If the argument\n m is given explicitly, height is ignored.\n control_scale : bool, (default True)\n Whether to add a control scale on the map.\n marker_type : str, folium.Circle, folium.CircleMarker, folium.Marker (default None)\n Allowed string options are ('marker', 'circle', 'circle_marker'). Defaults to\n folium.Marker.\n marker_kwds: dict (default {})\n Additional keywords to be passed to the selected ``marker_type``, e.g.:\n\n radius : float\n Radius of the circle, in meters (for ``'circle'``) or pixels\n (for ``circle_marker``).\n icon : folium.map.Icon\n the :class:`folium.map.Icon` object to use to render the marker.\n draggable : bool (default False)\n Set to True to be able to drag the marker around the map.\n\n style_kwds : dict (default {})\n Additional style to be passed to folium ``style_function``:\n\n stroke : bool (default True)\n Whether to draw stroke along the path. Set it to ``False`` to\n disable borders on polygons or circles.\n color : str\n Stroke color\n weight : int\n Stroke width in pixels\n opacity : float (default 1.0)\n Stroke opacity\n fill : boolean (default True)\n Whether to fill the path with color. Set it to ``False`` to\n disable filling on polygons or circles.\n fillColor : str\n Fill color. Defaults to the value of the color option\n fillOpacity : float (default 0.5)\n Fill opacity.\n style_function : callable\n Function mapping a GeoJson Feature to a style ``dict``.\n\n * Style properties :func:`folium.vector_layers.path_options`\n * GeoJson features :class:`GeoSeries.__geo_interface__`\n\n e.g.::\n\n lambda x: {\"color\":\"red\" if x[\"properties\"][\"gdp_md_est\"]<10**6\n else \"blue\"}\n\n\n Plus all supported by :func:`folium.vector_layers.path_options`. See the\n documentation of :class:`folium.features.GeoJson` for details.\n\n highlight_kwds : dict (default {})\n Style to be passed to folium highlight_function. Uses the same keywords\n as ``style_kwds``. When empty, defaults to ``{\"fillOpacity\": 0.75}``.\n map_kwds : dict (default {})\n Additional keywords to be passed to folium :class:`~folium.folium.Map`,\n e.g. ``dragging``, or ``scrollWheelZoom``.\n\n **kwargs : dict\n Additional options to be passed on to the folium.\n\n Returns\n -------\n m : folium.folium.Map\n folium :class:`~folium.folium.Map` instance\n\n \"\"\"\n return _explore(\n s,\n color=color,\n m=m,\n tiles=tiles,\n attr=attr,\n highlight=highlight,\n width=width,\n height=height,\n control_scale=control_scale,\n marker_type=marker_type,\n marker_kwds=marker_kwds,\n style_kwds=style_kwds,\n highlight_kwds=highlight_kwds,\n map_kwds=map_kwds,\n **kwargs,\n )\n", "path": "geopandas/explore.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 7862f92d2a..fd5777aa0b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,7 +15,10 @@ New features and improvements: - Added ``exclusive`` parameter to ``sjoin_nearest`` method for Shapely >= 2.0 (#2877) Bug fixes: + +- Fix a color assignment in ``explore`` when using ``UserDefined`` bins (#2923) - ``assert_geodataframe_equal`` now handles GeoDataFrames with no active geometry (#2498) + ## Version 0.13.2 (Jun 6, 2023) Bug fix: diff --git a/ci/envs/310-dev.yaml b/ci/envs/310-dev.yaml index b6ae56fc25..dafa5020ba 100644 --- a/ci/envs/310-dev.yaml +++ b/ci/envs/310-dev.yaml @@ -33,3 +33,4 @@ dependencies: - git+https://github.com/pygeos/pygeos.git@master - git+https://github.com/python-visualization/folium.git@main - git+https://github.com/geopandas/xyzservices.git@main + - git+https://github.com/geopandas/geodatasets.git@main diff --git a/ci/envs/310-latest-conda-forge.yaml b/ci/envs/310-latest-conda-forge.yaml index 350bebe94f..af5a90eace 100644 --- a/ci/envs/310-latest-conda-forge.yaml +++ b/ci/envs/310-latest-conda-forge.yaml @@ -24,6 +24,7 @@ dependencies: - scipy - geopy - pointpats + - geodatasets # installed in tests.yaml, because not available on windows # - postgis - SQLalchemy<2 diff --git a/ci/envs/311-latest-conda-forge.yaml b/ci/envs/311-latest-conda-forge.yaml index 531151bce2..7b577934a0 100644 --- a/ci/envs/311-latest-conda-forge.yaml +++ b/ci/envs/311-latest-conda-forge.yaml @@ -25,6 +25,7 @@ dependencies: - scipy - geopy - pointpats + - geodatasets # installed in tests.yaml, because not available on windows # - postgis - SQLalchemy>=2 diff --git a/ci/envs/38-latest-conda-forge.yaml b/ci/envs/38-latest-conda-forge.yaml index a75dc3e655..d498df66f5 100644 --- a/ci/envs/38-latest-conda-forge.yaml +++ b/ci/envs/38-latest-conda-forge.yaml @@ -29,3 +29,4 @@ dependencies: - pyarrow - pyogrio - pointpats + - geodatasets diff --git a/ci/envs/38-latest-defaults.yaml b/ci/envs/38-latest-defaults.yaml index d995d22a71..90d3b5f1a1 100644 --- a/ci/envs/38-latest-defaults.yaml +++ b/ci/envs/38-latest-defaults.yaml @@ -29,3 +29,4 @@ dependencies: - pyarrow - folium - xyzservices + - geodatasets diff --git a/ci/envs/38-minimal.yaml b/ci/envs/38-minimal.yaml index b7a1436df4..3c215bdc05 100644 --- a/ci/envs/38-minimal.yaml +++ b/ci/envs/38-minimal.yaml @@ -23,3 +23,4 @@ dependencies: - SQLalchemy - libspatialite - pyarrow=8.0.0 + - geodatasets diff --git a/ci/envs/38-pd12-defaults.yaml b/ci/envs/38-pd12-defaults.yaml index 9608a37f1f..1585017d09 100644 --- a/ci/envs/38-pd12-defaults.yaml +++ b/ci/envs/38-pd12-defaults.yaml @@ -28,3 +28,4 @@ dependencies: - geopy - mapclassify==2.4.0 - pyarrow + - geodatasets diff --git a/ci/envs/39-latest-conda-forge.yaml b/ci/envs/39-latest-conda-forge.yaml index 35a1fde028..2a35ef62b6 100644 --- a/ci/envs/39-latest-conda-forge.yaml +++ b/ci/envs/39-latest-conda-forge.yaml @@ -27,6 +27,7 @@ dependencies: - scipy - geopy - pointpats + - geodatasets # installed in tests.yaml, because not available on windows # - postgis - SQLalchemy<2 diff --git a/ci/envs/39-pd13-conda-forge.yaml b/ci/envs/39-pd13-conda-forge.yaml index 634882fad6..8ffcf48895 100644 --- a/ci/envs/39-pd13-conda-forge.yaml +++ b/ci/envs/39-pd13-conda-forge.yaml @@ -24,6 +24,7 @@ dependencies: - xyzservices - scipy - geopy + - geodatasets # installed in tests.yaml, because not available on windows # - postgis - SQLalchemy<2 @@ -33,6 +34,3 @@ dependencies: - pyarrow # doctest testing - pytest-doctestplus - - pip - - pip: - - geodatasets diff --git a/geopandas/explore.py b/geopandas/explore.py index 04855679ee..505f574341 100644 --- a/geopandas/explore.py +++ b/geopandas/explore.py @@ -468,7 +468,7 @@ def _colormap_helper(_cmap, n_resample=None, idx=None): color = np.apply_along_axis( colors.to_hex, 1, - _colormap_helper(cmap, n_resample=k, idx=binning.yb), + _colormap_helper(cmap, n_resample=binning.k, idx=binning.yb), ) else: diff --git a/geopandas/tests/test_explore.py b/geopandas/tests/test_explore.py index c9cc8a5ffd..14d5061e84 100644 --- a/geopandas/tests/test_explore.py +++ b/geopandas/tests/test_explore.py @@ -8,6 +8,7 @@ branca = pytest.importorskip("branca") matplotlib = pytest.importorskip("matplotlib") mapclassify = pytest.importorskip("mapclassify") +geodatasets = pytest.importorskip("geodatasets") import matplotlib.cm as cm # noqa import matplotlib.colors as colors # noqa @@ -22,6 +23,7 @@ def setup_method(self): self.nybb = gpd.read_file(gpd.datasets.get_path("nybb")) self.world = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres")) self.cities = gpd.read_file(gpd.datasets.get_path("naturalearth_cities")) + self.chicago = gpd.read_file(geodatasets.get_path("geoda.chicago_commpop")) self.world["range"] = range(len(self.world)) self.missing = self.world.copy() np.random.seed(42) @@ -173,6 +175,7 @@ def test_choropleth_mapclassify(self): assert '"fillColor":"#5ec962"' in out_str assert '"fillColor":"#fde725"' in out_str assert '"fillColor":"#440154"' in out_str + # custom k m = self.world.explore(column="pop_est", scheme="naturalbreaks", k=3) out_str = self._fetch_map_string(m) @@ -180,6 +183,18 @@ def test_choropleth_mapclassify(self): assert '"fillColor":"#fde725"' in out_str assert '"fillColor":"#440154"' in out_str + # UserDefined overriding default k + m = self.chicago.explore( + column="POP2010", + scheme="UserDefined", + classification_kwds={"bins": [25000, 50000, 75000, 100000]}, + ) + out_str = self._fetch_map_string(m) + assert '"fillColor":"#fde725"' in out_str + assert '"fillColor":"#35b779"' in out_str + assert '"fillColor":"#31688e"' in out_str + assert '"fillColor":"#440154"' in out_str + def test_categorical(self): """Categorical maps""" # auto detection
GPflow__GPflow-1590
update intro_to_gpflow2 notebook to account for saved_model now working to finally close #1127
[ { "content": "# -*- coding: utf-8 -*-\n#\n# GPflow documentation build configuration file, created by\n# sphinx-quickstart on Mon Jul 25 12:37:37 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport types\n\n# When we install GPflow on readthedocs we omit installing Tensorflow\n# and Tensorflow Probability. We make up for it by mocking them here.\nautodoc_mock_imports = [\"tensorflow\", \"tensorflow_probability\"]\n\n\n# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# otherwise, readthedocs.org uses their theme by default, so no need to specify it\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # builtin extansions\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"numpydoc\",\n \"nbsphinx\",\n \"sphinx_autodoc_typehints\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n]\n\nset_type_checking_flag = True\ntypehints_fully_qualified = False\nalways_document_param_types = True\n# autoclass_content = 'both'\n\n# numpydoc_show_class_members = True\nnumpydoc_class_members_toctree = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n#\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"GPflow\"\ncopyright = \"2016-2020 The GPflow Contributors\"\nauthor = \"James Hensman and Alexander G. de G. Matthews and many others\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"2.1\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"2.1.2\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#\n# today = ''\n#\n# Else, today_fmt is used as the format for a strftime call.\n#\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\".ipynb_checkpoints\"]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_css_files = [\n \"green_theme.css\",\n]\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#\n# html_title = u'GPflow v0.2.0'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#\nhtml_logo = \"_static/GPflow_Logos_White.png\"\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#\n# html_favicon = None\nhtml_theme_options = {\"logo_only\": True}\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#\n# html_extra_path = []\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#\n# html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n#\n# html_domain_indices = True\n\n# If false, no index is generated.\n#\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"GPflowdoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"gpflow.tex\",\n \"GPflow Documentation\",\n \"James Hensman and Alexander G. de G. Matthews\",\n \"manual\",\n ),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n#\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#\n# latex_appendices = []\n\n# It false, will not define \\strong, \\code, \titleref, \\crossref ... but only\n# \\sphinxstrong, ..., \\sphinxtitleref, ... To help avoid clash with user added\n# packages.\n#\n# latex_keep_old_macro_names = True\n\n# If false, no module index is generated.\n#\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"GPflow\", \"GPflow Documentation\", [author], 1)]\n\n# If true, show URL addresses after external links.\n#\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"GPflow\",\n \"GPflow Documentation\",\n author,\n \"GPflow\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n# Documents to append as an appendix to all manuals.\n#\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n#\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#\n# texinfo_no_detailmenu = False\n\n\ndef setup(app):\n \"\"\" Entry point to sphinx build customisation. \"\"\"\n app.connect(\"autodoc-skip-member\", autodoc_skip_member_callback)\n\n\ndef autodoc_skip_member_callback(app, what, name, obj, skip, options):\n \"\"\"\n Only skip special methods and functions, including `__init__`, if they have no docstring.\n \"\"\"\n if isinstance(obj, (types.FunctionType, types.MethodType)):\n if getattr(obj, \"__doc__\", None) is not None:\n return False # never skip methods containing a docstring\n\n return skip\n", "path": "doc/source/conf.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# GPflow documentation build configuration file, created by\n# sphinx-quickstart on Mon Jul 25 12:37:37 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport types\n\n# When we install GPflow on readthedocs we omit installing Tensorflow\n# and Tensorflow Probability. We make up for it by mocking them here.\nautodoc_mock_imports = [\"tensorflow\", \"tensorflow_probability\"]\n\n\n# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# otherwise, readthedocs.org uses their theme by default, so no need to specify it\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # builtin extansions\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"numpydoc\",\n \"nbsphinx\",\n \"sphinx_autodoc_typehints\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n]\n\nset_type_checking_flag = True\ntypehints_fully_qualified = False\nalways_document_param_types = True\n# autoclass_content = 'both'\n\n# numpydoc_show_class_members = True\nnumpydoc_class_members_toctree = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n#\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"GPflow\"\ncopyright = \"2016-2020 The GPflow Contributors\"\nauthor = \"James Hensman and Alexander G. de G. Matthews and many others\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"2.1\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"2.1.3\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#\n# today = ''\n#\n# Else, today_fmt is used as the format for a strftime call.\n#\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\".ipynb_checkpoints\"]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_css_files = [\n \"green_theme.css\",\n]\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#\n# html_title = u'GPflow v0.2.0'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#\nhtml_logo = \"_static/GPflow_Logos_White.png\"\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#\n# html_favicon = None\nhtml_theme_options = {\"logo_only\": True}\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#\n# html_extra_path = []\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#\n# html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n#\n# html_domain_indices = True\n\n# If false, no index is generated.\n#\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"GPflowdoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"gpflow.tex\",\n \"GPflow Documentation\",\n \"James Hensman and Alexander G. de G. Matthews\",\n \"manual\",\n ),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n#\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#\n# latex_appendices = []\n\n# It false, will not define \\strong, \\code, \titleref, \\crossref ... but only\n# \\sphinxstrong, ..., \\sphinxtitleref, ... To help avoid clash with user added\n# packages.\n#\n# latex_keep_old_macro_names = True\n\n# If false, no module index is generated.\n#\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"GPflow\", \"GPflow Documentation\", [author], 1)]\n\n# If true, show URL addresses after external links.\n#\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"GPflow\",\n \"GPflow Documentation\",\n author,\n \"GPflow\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n# Documents to append as an appendix to all manuals.\n#\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n#\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#\n# texinfo_no_detailmenu = False\n\n\ndef setup(app):\n \"\"\" Entry point to sphinx build customisation. \"\"\"\n app.connect(\"autodoc-skip-member\", autodoc_skip_member_callback)\n\n\ndef autodoc_skip_member_callback(app, what, name, obj, skip, options):\n \"\"\"\n Only skip special methods and functions, including `__init__`, if they have no docstring.\n \"\"\"\n if isinstance(obj, (types.FunctionType, types.MethodType)):\n if getattr(obj, \"__doc__\", None) is not None:\n return False # never skip methods containing a docstring\n\n return skip\n", "path": "doc/source/conf.py" } ]
diff --git a/VERSION b/VERSION index eca07e4c1..ac2cdeba0 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1.2 +2.1.3 diff --git a/doc/source/conf.py b/doc/source/conf.py index 2a6b81be8..a4b29b7f4 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -94,7 +94,7 @@ # The short X.Y version. version = "2.1" # The full version, including alpha/beta/rc tags. -release = "2.1.2" +release = "2.1.3" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.
mne-tools__mne-bids-708
_read_events() changes event values Currently I use following procedure to write a BIDS dataset (simplified): ``` raw = mne.io.read_raw_fif(raw_file,preload=False) events = mne.find_events(raw) bids_basename = mne_bids.BIDSPath(root = output_path, subject=f'{sub:02d}',session=f'{ses:02d}',task=f'{task}') mne-bids.write_raw_bids(raw, bids_basename,event_id = event_id[task], events_data = events,overwrite=True) ``` Doing so, the event values that are being written to `events.tsv`are not the ones that are defined in the meg file. Specifically, it seems that the original values are ignored and instead the number of unique codes is simply used incrementally. I started with following events: `1, 2, 4, 8, 16, 512, 2048` and ended up with `1, 2, 3, 4, 5, 6, 7`. The events in the meg file are unchanged. From looking at [read.py](https://github.com/mne-tools/mne-bids/blob/master/mne_bids/read.py), the problem seems to be that during conversion of events to annotations the original values get lost (see [here](https://github.com/mne-tools/mne-bids/blob/d65b8ca3edf38a4be5a1db8d24592cf2a1a3d5ef/mne_bids/read.py#L131)). So, there is a good chance that might be an issue with `mne` rather than `mne_bids`
[ { "content": "\"\"\"Check whether a file format is supported by BIDS and then load it.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Chris Holdgraf <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\nimport os.path as op\nimport glob\nimport json\nfrom datetime import datetime, timezone\n\nimport numpy as np\nimport mne\nfrom mne import io, read_events, events_from_annotations\nfrom mne.utils import has_nibabel, logger, warn\nfrom mne.coreg import fit_matched_points\nfrom mne.transforms import apply_trans\n\nfrom mne_bids.dig import _read_dig_bids\nfrom mne_bids.tsv_handler import _from_tsv, _drop\nfrom mne_bids.config import ALLOWED_DATATYPE_EXTENSIONS, reader, _map_options\nfrom mne_bids.utils import _extract_landmarks, _get_ch_type_mapping\nfrom mne_bids.path import (BIDSPath, _parse_ext, _find_matching_sidecar,\n _infer_datatype)\n\n\ndef _read_raw(raw_fpath, electrode=None, hsp=None, hpi=None,\n allow_maxshield=False, config=None, verbose=None, **kwargs):\n \"\"\"Read a raw file into MNE, making inferences based on extension.\"\"\"\n _, ext = _parse_ext(raw_fpath)\n\n # KIT systems\n if ext in ['.con', '.sqd']:\n raw = io.read_raw_kit(raw_fpath, elp=electrode, hsp=hsp,\n mrk=hpi, preload=False, **kwargs)\n\n # BTi systems\n elif ext == '.pdf':\n raw = io.read_raw_bti(raw_fpath, config_fname=config,\n head_shape_fname=hsp,\n preload=False, verbose=verbose,\n **kwargs)\n\n elif ext == '.fif':\n raw = reader[ext](raw_fpath, allow_maxshield, **kwargs)\n\n elif ext in ['.ds', '.vhdr', '.set', '.edf', '.bdf']:\n raw = reader[ext](raw_fpath, **kwargs)\n\n # MEF and NWB are allowed, but not yet implemented\n elif ext in ['.mef', '.nwb']:\n raise ValueError(f'Got \"{ext}\" as extension. This is an allowed '\n f'extension but there is no IO support for this '\n f'file format yet.')\n\n # No supported data found ...\n # ---------------------------\n else:\n raise ValueError(f'Raw file name extension must be one '\n f'of {ALLOWED_DATATYPE_EXTENSIONS}\\n'\n f'Got {ext}')\n return raw\n\n\ndef _read_events(events_data, event_id, raw, verbose=None):\n \"\"\"Retrieve events (for use in *_events.tsv) from FIFF/array & Annotations.\n\n Parameters\n ----------\n events_data : str | np.ndarray | None\n If a string, a path to an events file. If an array, an MNE events array\n (shape n_events, 3). If None, events will be generated from\n ``raw.annotations``.\n event_id : dict | None\n The event id dict used to create a 'trial_type' column in events.tsv,\n mapping a description key to an integer-valued event code.\n raw : mne.io.Raw\n The data as MNE-Python Raw object.\n verbose : bool | str | int | None\n If not None, override default verbose level (see :func:`mne.verbose`).\n\n Returns\n -------\n all_events : np.ndarray, shape = (n_events, 3)\n The first column contains the event time in samples and the third\n column contains the event id. The second column is ignored for now but\n typically contains the value of the trigger channel either immediately\n before the event or immediately after.\n all_dur : np.ndarray, shape (n_events,)\n The event durations in seconds.\n all_desc : dict\n A dictionary with the keys corresponding to the event descriptions and\n the values to the event IDs.\n\n \"\"\"\n # get events from events_data\n if isinstance(events_data, str):\n events = read_events(events_data, verbose=verbose).astype(int)\n elif isinstance(events_data, np.ndarray):\n if events_data.ndim != 2:\n raise ValueError('Events must have two dimensions, '\n f'found {events_data.ndim}')\n if events_data.shape[1] != 3:\n raise ValueError('Events must have second dimension of length 3, '\n f'found {events_data.shape[1]}')\n events = events_data\n else:\n events = np.empty(shape=(0, 3), dtype=int)\n\n if events.size > 0:\n # Only keep events for which we have an ID <> description mapping.\n ids_without_desc = set(events[:, 2]) - set(event_id.values())\n if ids_without_desc:\n raise ValueError(\n f'No description was specified for the following event(s): '\n f'{\", \".join([str(x) for x in sorted(ids_without_desc)])}. '\n f'Please add them to the event_id dictionary, or drop them '\n f'from the events_data array.'\n )\n del ids_without_desc\n mask = [e in list(event_id.values()) for e in events[:, 2]]\n events = events[mask]\n\n # Append events to raw.annotations. All event onsets are relative to\n # measurement beginning.\n id_to_desc_map = dict(zip(event_id.values(), event_id.keys()))\n # We don't pass `first_samp`, as set_annotations() below will take\n # care of this shift automatically.\n new_annotations = mne.annotations_from_events(\n events=events, sfreq=raw.info['sfreq'], event_desc=id_to_desc_map,\n orig_time=raw.annotations.orig_time, verbose=verbose)\n\n raw = raw.copy() # Don't alter the original.\n annotations = raw.annotations.copy()\n\n # We use `+=` here because `Annotations.__iadd__()` does the right\n # thing and also performs a sanity check on `Annotations.orig_time`.\n annotations += new_annotations\n raw.set_annotations(annotations)\n del id_to_desc_map, annotations, new_annotations\n\n # Now convert the Annotations to events.\n all_events, all_desc = events_from_annotations(\n raw,\n regexp=None, # Include `BAD_` and `EDGE_` Annotations, too.\n verbose=verbose\n )\n all_dur = raw.annotations.duration\n if all_events.size == 0 and 'rest' not in raw.filenames[0]:\n warn('No events found or provided. Please add annotations '\n 'to the raw data, or provide the events_data and '\n 'event_id parameters. If this is resting state data '\n 'it is recommended to name the task \"rest\".')\n\n return all_events, all_dur, all_desc\n\n\ndef _handle_participants_reading(participants_fname, raw,\n subject, verbose=None):\n participants_tsv = _from_tsv(participants_fname)\n subjects = participants_tsv['participant_id']\n row_ind = subjects.index(subject)\n\n # set data from participants tsv into subject_info\n for infokey, infovalue in participants_tsv.items():\n if infokey == 'sex' or infokey == 'hand':\n value = _map_options(what=infokey, key=infovalue[row_ind],\n fro='bids', to='mne')\n # We don't know how to translate to MNE, so skip.\n if value is None:\n if infokey == 'sex':\n info_str = 'subject sex'\n else:\n info_str = 'subject handedness'\n warn(f'Unable to map `{infokey}` value to MNE. '\n f'Not setting {info_str}.')\n else:\n value = infovalue[row_ind]\n # add data into raw.Info\n if raw.info['subject_info'] is None:\n raw.info['subject_info'] = dict()\n raw.info['subject_info'][infokey] = value\n\n return raw\n\n\ndef _handle_scans_reading(scans_fname, raw, bids_path, verbose=False):\n \"\"\"Read associated scans.tsv and set meas_date.\"\"\"\n scans_tsv = _from_tsv(scans_fname)\n fname = bids_path.fpath.name\n\n if '_split-' in fname:\n # for split files, scans only stores the filename without ``split``\n extension = bids_path.fpath.suffix\n bids_path.update(split=None, extension=extension)\n fname = bids_path.basename\n elif fname.endswith('.pdf'):\n # for BTI files, the scan is an entire directory\n fname = fname.split('.')[0]\n\n # get the row corresponding to the file\n # use string concatenation instead of os.path\n # to work nicely with windows\n data_fname = bids_path.datatype + '/' + fname\n fnames = scans_tsv['filename']\n acq_times = scans_tsv['acq_time']\n row_ind = fnames.index(data_fname)\n\n # extract the acquisition time from scans file\n acq_time = acq_times[row_ind]\n if acq_time != 'n/a':\n # microseconds in the acquisition time is optional\n if '.' not in acq_time:\n # acquisition time ends with '.%fZ' microseconds string\n acq_time += '.0Z'\n acq_time = datetime.strptime(acq_time, '%Y-%m-%dT%H:%M:%S.%fZ')\n acq_time = acq_time.replace(tzinfo=timezone.utc)\n\n if verbose:\n logger.debug(f'Loaded {scans_fname} scans file to set '\n f'acq_time as {acq_time}.')\n raw.set_meas_date(acq_time)\n return raw\n\n\ndef _handle_info_reading(sidecar_fname, raw, verbose=None):\n \"\"\"Read associated sidecar.json and populate raw.\n\n Handle PowerLineFrequency of recording.\n \"\"\"\n with open(sidecar_fname, 'r', encoding='utf-8-sig') as fin:\n sidecar_json = json.load(fin)\n\n # read in the sidecar JSON's line frequency\n line_freq = sidecar_json.get(\"PowerLineFrequency\")\n if line_freq == \"n/a\":\n line_freq = None\n\n if raw.info[\"line_freq\"] is not None and line_freq is None:\n line_freq = raw.info[\"line_freq\"] # take from file is present\n\n if raw.info[\"line_freq\"] is not None and line_freq is not None:\n # if both have a set Power Line Frequency, then\n # check that they are the same, else there is a\n # discrepency in the metadata of the dataset.\n if raw.info[\"line_freq\"] != line_freq:\n raise ValueError(\"Line frequency in sidecar json does \"\n \"not match the info datastructure of \"\n \"the mne.Raw. \"\n \"Raw is -> {} \".format(raw.info[\"line_freq\"]),\n \"Sidecar JSON is -> {} \".format(line_freq))\n\n raw.info[\"line_freq\"] = line_freq\n return raw\n\n\ndef _handle_events_reading(events_fname, raw):\n \"\"\"Read associated events.tsv and populate raw.\n\n Handle onset, duration, and description of each event.\n \"\"\"\n logger.info('Reading events from {}.'.format(events_fname))\n events_dict = _from_tsv(events_fname)\n\n # Get the descriptions of the events\n if 'trial_type' in events_dict:\n trial_type_col_name = 'trial_type'\n elif 'stim_type' in events_dict: # Backward-compat with old datasets.\n trial_type_col_name = 'stim_type'\n warn(f'The events file, {events_fname}, contains a \"stim_type\" '\n f'column. This column should be renamed to \"trial_type\" for '\n f'BIDS compatibility.')\n else:\n trial_type_col_name = None\n\n if trial_type_col_name is not None:\n # Drop events unrelated to a trial type\n events_dict = _drop(events_dict, 'n/a', trial_type_col_name)\n\n if 'value' in events_dict:\n # Check whether the `trial_type` <> `value` mapping is unique.\n trial_types = events_dict[trial_type_col_name]\n values = np.asarray(events_dict['value'], dtype=str)\n for trial_type in np.unique(trial_types):\n idx = np.where(trial_type == np.atleast_1d(trial_types))[0]\n matching_values = values[idx]\n\n if len(np.unique(matching_values)) > 1:\n # Event type descriptors are ambiguous; create hierarchical\n # event descriptors.\n logger.info(\n f'The event \"{trial_type}\" refers to multiple event '\n f'values. Creating hierarchical event names.')\n for ii in idx:\n new_name = f'{trial_type}/{values[ii]}'\n logger.info(f' Renaming event: {trial_type} -> '\n f'{new_name}')\n trial_types[ii] = new_name\n descriptions = np.asarray(trial_types, dtype=str)\n else:\n descriptions = np.asarray(events_dict[trial_type_col_name],\n dtype=str)\n elif 'value' in events_dict:\n # If we don't have a proper description of the events, perhaps we have\n # at least an event value?\n # Drop events unrelated to value\n events_dict = _drop(events_dict, 'n/a', 'value')\n descriptions = np.asarray(events_dict['value'], dtype=str)\n\n # Worst case, we go with 'n/a' for all events\n else:\n descriptions = np.array(['n/a'] * len(events_dict['onset']), dtype=str)\n\n # Deal with \"n/a\" strings before converting to float\n ons = [np.nan if on == 'n/a' else on for on in events_dict['onset']]\n dus = [0 if du == 'n/a' else du for du in events_dict['duration']]\n onsets = np.asarray(ons, dtype=float)\n durations = np.asarray(dus, dtype=float)\n\n # Keep only events where onset is known\n good_events_idx = ~np.isnan(onsets)\n onsets = onsets[good_events_idx]\n durations = durations[good_events_idx]\n descriptions = descriptions[good_events_idx]\n del good_events_idx\n\n # Add Events to raw as annotations\n annot_from_events = mne.Annotations(onset=onsets,\n duration=durations,\n description=descriptions,\n orig_time=None)\n raw.set_annotations(annot_from_events)\n return raw\n\n\ndef _get_bads_from_tsv_data(tsv_data):\n \"\"\"Extract names of bads from data read from channels.tsv.\"\"\"\n idx = []\n for ch_idx, status in enumerate(tsv_data['status']):\n if status.lower() == 'bad':\n idx.append(ch_idx)\n\n bads = [tsv_data['name'][i] for i in idx]\n return bads\n\n\ndef _handle_channels_reading(channels_fname, raw):\n \"\"\"Read associated channels.tsv and populate raw.\n\n Updates status (bad) and types of channels.\n \"\"\"\n logger.info('Reading channel info from {}.'.format(channels_fname))\n channels_dict = _from_tsv(channels_fname)\n ch_names_tsv = channels_dict['name']\n\n # Now we can do some work.\n # The \"type\" column is mandatory in BIDS. We can use it to set channel\n # types in the raw data using a mapping between channel types\n channel_type_dict = dict()\n\n # Get the best mapping we currently have from BIDS to MNE nomenclature\n bids_to_mne_ch_types = _get_ch_type_mapping(fro='bids', to='mne')\n ch_types_json = channels_dict['type']\n for ch_name, ch_type in zip(ch_names_tsv, ch_types_json):\n\n # Try to map from BIDS nomenclature to MNE, leave channel type\n # untouched if we are uncertain\n updated_ch_type = bids_to_mne_ch_types.get(ch_type, None)\n\n if updated_ch_type is None:\n # XXX Try again with uppercase spelling – this should be removed\n # XXX once https://github.com/bids-standard/bids-validator/issues/1018 # noqa:E501\n # XXX has been resolved.\n # XXX x-ref https://github.com/mne-tools/mne-bids/issues/481\n updated_ch_type = bids_to_mne_ch_types.get(ch_type.upper(), None)\n if updated_ch_type is not None:\n msg = ('The BIDS dataset contains channel types in lowercase '\n 'spelling. This violates the BIDS specification and '\n 'will raise an error in the future.')\n warn(msg)\n\n if updated_ch_type is not None:\n channel_type_dict[ch_name] = updated_ch_type\n\n # Rename channels in loaded Raw to match those read from the BIDS sidecar\n for bids_ch_name, raw_ch_name in zip(ch_names_tsv, raw.ch_names.copy()):\n if bids_ch_name != raw_ch_name:\n raw.rename_channels({raw_ch_name: bids_ch_name})\n\n # Set the channel types in the raw data according to channels.tsv\n raw.set_channel_types(channel_type_dict)\n\n # Set bad channels based on _channels.tsv sidecar\n if 'status' in channels_dict:\n bads = _get_bads_from_tsv_data(channels_dict)\n raw.info['bads'] = bads\n\n return raw\n\n\ndef read_raw_bids(bids_path, extra_params=None, verbose=True):\n \"\"\"Read BIDS compatible data.\n\n Will attempt to read associated events.tsv and channels.tsv files to\n populate the returned raw object with raw.annotations and raw.info['bads'].\n\n Parameters\n ----------\n bids_path : mne_bids.BIDSPath\n The file to read. The :class:`mne_bids.BIDSPath` instance passed here\n **must** have the ``.root`` attribute set. The ``.datatype`` attribute\n **may** be set. If ``.datatype`` is not set and only one data type\n (e.g., only EEG or MEG data) is present in the dataset, it will be\n selected automatically.\n extra_params : None | dict\n Extra parameters to be passed to MNE read_raw_* functions.\n If a dict, for example: ``extra_params=dict(allow_maxshield=True)``.\n Note that the ``exclude`` parameter, which is supported by some\n MNE-Python readers, is not supported; instead, you need to subset\n your channels **after** reading.\n verbose : bool\n The verbosity level.\n\n Returns\n -------\n raw : mne.io.Raw\n The data as MNE-Python Raw object.\n\n Raises\n ------\n RuntimeError\n If multiple recording data types are present in the dataset, but\n ``datatype=None``.\n\n RuntimeError\n If more than one data files exist for the specified recording.\n\n RuntimeError\n If no data file in a supported format can be located.\n\n ValueError\n If the specified ``datatype`` cannot be found in the dataset.\n\n \"\"\"\n if not isinstance(bids_path, BIDSPath):\n raise RuntimeError('\"bids_path\" must be a BIDSPath object. Please '\n 'instantiate using mne_bids.BIDSPath().')\n\n bids_path = bids_path.copy()\n sub = bids_path.subject\n ses = bids_path.session\n bids_root = bids_path.root\n datatype = bids_path.datatype\n suffix = bids_path.suffix\n\n # check root available\n if bids_root is None:\n raise ValueError('The root of the \"bids_path\" must be set. '\n 'Please use `bids_path.update(root=\"<root>\")` '\n 'to set the root of the BIDS folder to read.')\n\n # infer the datatype and suffix if they are not present in the BIDSPath\n if datatype is None:\n datatype = _infer_datatype(root=bids_root, sub=sub, ses=ses)\n bids_path.update(datatype=datatype)\n if suffix is None:\n bids_path.update(suffix=datatype)\n\n data_dir = bids_path.directory\n bids_fname = bids_path.fpath.name\n\n if op.splitext(bids_fname)[1] == '.pdf':\n bids_raw_folder = op.join(data_dir, f'{bids_path.basename}')\n bids_fpath = glob.glob(op.join(bids_raw_folder, 'c,rf*'))[0]\n config = op.join(bids_raw_folder, 'config')\n else:\n bids_fpath = op.join(data_dir, bids_fname)\n config = None\n\n if extra_params is None:\n extra_params = dict()\n elif 'exclude' in extra_params:\n del extra_params['exclude']\n logger.info('\"exclude\" parameter is not supported by read_raw_bids')\n\n raw = _read_raw(bids_fpath, electrode=None, hsp=None, hpi=None,\n config=config, verbose=None, **extra_params)\n\n # Try to find an associated events.tsv to get information about the\n # events in the recorded data\n events_fname = _find_matching_sidecar(bids_path, suffix='events',\n extension='.tsv',\n on_error='warn')\n if events_fname is not None:\n raw = _handle_events_reading(events_fname, raw)\n\n # Try to find an associated channels.tsv to get information about the\n # status and type of present channels\n channels_fname = _find_matching_sidecar(bids_path,\n suffix='channels',\n extension='.tsv',\n on_error='warn')\n if channels_fname is not None:\n raw = _handle_channels_reading(channels_fname, raw)\n\n # Try to find an associated electrodes.tsv and coordsystem.json\n # to get information about the status and type of present channels\n on_error = 'warn' if suffix == 'ieeg' else 'ignore'\n electrodes_fname = _find_matching_sidecar(bids_path,\n suffix='electrodes',\n extension='.tsv',\n on_error=on_error)\n coordsystem_fname = _find_matching_sidecar(bids_path,\n suffix='coordsystem',\n extension='.json',\n on_error=on_error)\n if electrodes_fname is not None:\n if coordsystem_fname is None:\n raise RuntimeError(f\"BIDS mandates that the coordsystem.json \"\n f\"should exist if electrodes.tsv does. \"\n f\"Please create coordsystem.json for\"\n f\"{bids_path.basename}\")\n if datatype in ['meg', 'eeg', 'ieeg']:\n raw = _read_dig_bids(electrodes_fname, coordsystem_fname,\n raw, datatype, verbose)\n\n # Try to find an associated sidecar .json to get information about the\n # recording snapshot\n sidecar_fname = _find_matching_sidecar(bids_path,\n suffix=datatype,\n extension='.json',\n on_error='warn')\n if sidecar_fname is not None:\n raw = _handle_info_reading(sidecar_fname, raw, verbose=verbose)\n\n # read in associated scans filename\n scans_fname = BIDSPath(\n subject=bids_path.subject, session=bids_path.session,\n suffix='scans', extension='.tsv',\n root=bids_path.root\n ).fpath\n if scans_fname.exists():\n raw = _handle_scans_reading(scans_fname, raw, bids_path,\n verbose=verbose)\n\n # read in associated subject info from participants.tsv\n participants_tsv_fpath = op.join(bids_root, 'participants.tsv')\n subject = f\"sub-{bids_path.subject}\"\n if op.exists(participants_tsv_fpath):\n raw = _handle_participants_reading(participants_tsv_fpath, raw,\n subject, verbose=verbose)\n else:\n warn(\"Participants file not found for {}... Not reading \"\n \"in any particpants.tsv data.\".format(bids_fname))\n\n return raw\n\n\ndef get_head_mri_trans(bids_path, extra_params=None):\n \"\"\"Produce transformation matrix from MEG and MRI landmark points.\n\n Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar\n files of (i) the MEG and (ii) the T1 weighted MRI data. The two sets of\n points will then be used to calculate a transformation matrix from head\n coordinates to MRI coordinates.\n\n Parameters\n ----------\n bids_path : mne_bids.BIDSPath\n The path of the recording for which to retrieve the transformation. The\n :class:`mne_bids.BIDSPath` instance passed here **must** have the\n ``.root`` attribute set.\n extra_params : None | dict\n Extra parameters to be passed to MNE read_raw_* functions when reading\n the lankmarks from the MEG file.\n If a dict, for example: ``extra_params=dict(allow_maxshield=True)``.\n\n Returns\n -------\n trans : mne.transforms.Transform\n The data transformation matrix from head to MRI coordinates\n\n \"\"\"\n if not has_nibabel(): # pragma: no cover\n raise ImportError('This function requires nibabel.')\n import nibabel as nib\n\n if not isinstance(bids_path, BIDSPath):\n raise RuntimeError('\"bids_path\" must be a BIDSPath object. Please '\n 'instantiate using mne_bids.BIDSPath().')\n\n # check root available\n bids_path = bids_path.copy()\n bids_root = bids_path.root\n if bids_root is None:\n raise ValueError('The root of the \"bids_path\" must be set. '\n 'Please use `bids_path.update(root=\"<root>\")` '\n 'to set the root of the BIDS folder to read.')\n # only get this for MEG data\n bids_path.update(datatype='meg')\n\n # Get the sidecar file for MRI landmarks\n bids_fname = bids_path.update(suffix='meg', root=bids_root)\n t1w_json_path = _find_matching_sidecar(bids_fname, suffix='T1w',\n extension='.json')\n\n # Get MRI landmarks from the JSON sidecar\n with open(t1w_json_path, 'r', encoding='utf-8-sig') as f:\n t1w_json = json.load(f)\n mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict())\n mri_landmarks = np.asarray((mri_coords_dict.get('LPA', np.nan),\n mri_coords_dict.get('NAS', np.nan),\n mri_coords_dict.get('RPA', np.nan)))\n if np.isnan(mri_landmarks).any():\n raise RuntimeError('Could not parse T1w sidecar file: \"{}\"\\n\\n'\n 'The sidecar file MUST contain a key '\n '\"AnatomicalLandmarkCoordinates\" pointing to a '\n 'dict with keys \"LPA\", \"NAS\", \"RPA\". '\n 'Yet, the following structure was found:\\n\\n\"{}\"'\n .format(t1w_json_path, t1w_json))\n\n # The MRI landmarks are in \"voxels\". We need to convert the to the\n # neuromag RAS coordinate system in order to compare the with MEG landmarks\n # see also: `mne_bids.write.write_anat`\n t1w_path = t1w_json_path.replace('.json', '.nii')\n if not op.exists(t1w_path):\n t1w_path += '.gz' # perhaps it is .nii.gz? ... else raise an error\n if not op.exists(t1w_path):\n raise RuntimeError('Could not find the T1 weighted MRI associated '\n 'with \"{}\". Tried: \"{}\" but it does not exist.'\n .format(t1w_json_path, t1w_path))\n t1_nifti = nib.load(t1w_path)\n # Convert to MGH format to access vox2ras method\n t1_mgh = nib.MGHImage(t1_nifti.dataobj, t1_nifti.affine)\n\n # now extract transformation matrix and put back to RAS coordinates of MRI\n vox2ras_tkr = t1_mgh.header.get_vox2ras_tkr()\n mri_landmarks = apply_trans(vox2ras_tkr, mri_landmarks)\n mri_landmarks = mri_landmarks * 1e-3\n\n # Get MEG landmarks from the raw file\n _, ext = _parse_ext(bids_fname)\n if extra_params is None:\n extra_params = dict()\n if ext == '.fif':\n extra_params = dict(allow_maxshield=True)\n\n raw = read_raw_bids(bids_path=bids_path, extra_params=extra_params)\n meg_coords_dict = _extract_landmarks(raw.info['dig'])\n meg_landmarks = np.asarray((meg_coords_dict['LPA'],\n meg_coords_dict['NAS'],\n meg_coords_dict['RPA']))\n\n # Given the two sets of points, fit the transform\n trans_fitted = fit_matched_points(src_pts=meg_landmarks,\n tgt_pts=mri_landmarks)\n trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted)\n return trans\n", "path": "mne_bids/read.py" } ]
[ { "content": "\"\"\"Check whether a file format is supported by BIDS and then load it.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Chris Holdgraf <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\nimport os.path as op\nimport glob\nimport json\nfrom datetime import datetime, timezone\n\nimport numpy as np\nimport mne\nfrom mne import io, read_events, events_from_annotations\nfrom mne.utils import has_nibabel, logger, warn\nfrom mne.coreg import fit_matched_points\nfrom mne.transforms import apply_trans\n\nfrom mne_bids.dig import _read_dig_bids\nfrom mne_bids.tsv_handler import _from_tsv, _drop\nfrom mne_bids.config import ALLOWED_DATATYPE_EXTENSIONS, reader, _map_options\nfrom mne_bids.utils import _extract_landmarks, _get_ch_type_mapping\nfrom mne_bids.path import (BIDSPath, _parse_ext, _find_matching_sidecar,\n _infer_datatype)\n\n\ndef _read_raw(raw_fpath, electrode=None, hsp=None, hpi=None,\n allow_maxshield=False, config=None, verbose=None, **kwargs):\n \"\"\"Read a raw file into MNE, making inferences based on extension.\"\"\"\n _, ext = _parse_ext(raw_fpath)\n\n # KIT systems\n if ext in ['.con', '.sqd']:\n raw = io.read_raw_kit(raw_fpath, elp=electrode, hsp=hsp,\n mrk=hpi, preload=False, **kwargs)\n\n # BTi systems\n elif ext == '.pdf':\n raw = io.read_raw_bti(raw_fpath, config_fname=config,\n head_shape_fname=hsp,\n preload=False, verbose=verbose,\n **kwargs)\n\n elif ext == '.fif':\n raw = reader[ext](raw_fpath, allow_maxshield, **kwargs)\n\n elif ext in ['.ds', '.vhdr', '.set', '.edf', '.bdf']:\n raw = reader[ext](raw_fpath, **kwargs)\n\n # MEF and NWB are allowed, but not yet implemented\n elif ext in ['.mef', '.nwb']:\n raise ValueError(f'Got \"{ext}\" as extension. This is an allowed '\n f'extension but there is no IO support for this '\n f'file format yet.')\n\n # No supported data found ...\n # ---------------------------\n else:\n raise ValueError(f'Raw file name extension must be one '\n f'of {ALLOWED_DATATYPE_EXTENSIONS}\\n'\n f'Got {ext}')\n return raw\n\n\ndef _read_events(events_data, event_id, raw, verbose=None):\n \"\"\"Retrieve events (for use in *_events.tsv) from FIFF/array & Annotations.\n\n Parameters\n ----------\n events_data : str | np.ndarray | None\n If a string, a path to an events file. If an array, an MNE events array\n (shape n_events, 3). If None, events will be generated from\n ``raw.annotations``.\n event_id : dict | None\n The event id dict used to create a 'trial_type' column in events.tsv,\n mapping a description key to an integer-valued event code.\n raw : mne.io.Raw\n The data as MNE-Python Raw object.\n verbose : bool | str | int | None\n If not None, override default verbose level (see :func:`mne.verbose`).\n\n Returns\n -------\n all_events : np.ndarray, shape = (n_events, 3)\n The first column contains the event time in samples and the third\n column contains the event id. The second column is ignored for now but\n typically contains the value of the trigger channel either immediately\n before the event or immediately after.\n all_dur : np.ndarray, shape (n_events,)\n The event durations in seconds.\n all_desc : dict\n A dictionary with the keys corresponding to the event descriptions and\n the values to the event IDs.\n\n \"\"\"\n # get events from events_data\n if isinstance(events_data, str):\n events = read_events(events_data, verbose=verbose).astype(int)\n elif isinstance(events_data, np.ndarray):\n if events_data.ndim != 2:\n raise ValueError('Events must have two dimensions, '\n f'found {events_data.ndim}')\n if events_data.shape[1] != 3:\n raise ValueError('Events must have second dimension of length 3, '\n f'found {events_data.shape[1]}')\n events = events_data\n else:\n events = np.empty(shape=(0, 3), dtype=int)\n\n if events.size > 0:\n # Only keep events for which we have an ID <> description mapping.\n ids_without_desc = set(events[:, 2]) - set(event_id.values())\n if ids_without_desc:\n raise ValueError(\n f'No description was specified for the following event(s): '\n f'{\", \".join([str(x) for x in sorted(ids_without_desc)])}. '\n f'Please add them to the event_id dictionary, or drop them '\n f'from the events_data array.'\n )\n del ids_without_desc\n mask = [e in list(event_id.values()) for e in events[:, 2]]\n events = events[mask]\n\n # Append events to raw.annotations. All event onsets are relative to\n # measurement beginning.\n id_to_desc_map = dict(zip(event_id.values(), event_id.keys()))\n # We don't pass `first_samp`, as set_annotations() below will take\n # care of this shift automatically.\n new_annotations = mne.annotations_from_events(\n events=events, sfreq=raw.info['sfreq'], event_desc=id_to_desc_map,\n orig_time=raw.annotations.orig_time, verbose=verbose)\n\n raw = raw.copy() # Don't alter the original.\n annotations = raw.annotations.copy()\n\n # We use `+=` here because `Annotations.__iadd__()` does the right\n # thing and also performs a sanity check on `Annotations.orig_time`.\n annotations += new_annotations\n raw.set_annotations(annotations)\n del id_to_desc_map, annotations, new_annotations\n\n # Now convert the Annotations to events.\n all_events, all_desc = events_from_annotations(\n raw,\n event_id=event_id,\n regexp=None, # Include `BAD_` and `EDGE_` Annotations, too.\n verbose=verbose\n )\n all_dur = raw.annotations.duration\n if all_events.size == 0 and 'rest' not in raw.filenames[0]:\n warn('No events found or provided. Please add annotations '\n 'to the raw data, or provide the events_data and '\n 'event_id parameters. If this is resting state data '\n 'it is recommended to name the task \"rest\".')\n\n return all_events, all_dur, all_desc\n\n\ndef _handle_participants_reading(participants_fname, raw,\n subject, verbose=None):\n participants_tsv = _from_tsv(participants_fname)\n subjects = participants_tsv['participant_id']\n row_ind = subjects.index(subject)\n\n # set data from participants tsv into subject_info\n for infokey, infovalue in participants_tsv.items():\n if infokey == 'sex' or infokey == 'hand':\n value = _map_options(what=infokey, key=infovalue[row_ind],\n fro='bids', to='mne')\n # We don't know how to translate to MNE, so skip.\n if value is None:\n if infokey == 'sex':\n info_str = 'subject sex'\n else:\n info_str = 'subject handedness'\n warn(f'Unable to map `{infokey}` value to MNE. '\n f'Not setting {info_str}.')\n else:\n value = infovalue[row_ind]\n # add data into raw.Info\n if raw.info['subject_info'] is None:\n raw.info['subject_info'] = dict()\n raw.info['subject_info'][infokey] = value\n\n return raw\n\n\ndef _handle_scans_reading(scans_fname, raw, bids_path, verbose=False):\n \"\"\"Read associated scans.tsv and set meas_date.\"\"\"\n scans_tsv = _from_tsv(scans_fname)\n fname = bids_path.fpath.name\n\n if '_split-' in fname:\n # for split files, scans only stores the filename without ``split``\n extension = bids_path.fpath.suffix\n bids_path.update(split=None, extension=extension)\n fname = bids_path.basename\n elif fname.endswith('.pdf'):\n # for BTI files, the scan is an entire directory\n fname = fname.split('.')[0]\n\n # get the row corresponding to the file\n # use string concatenation instead of os.path\n # to work nicely with windows\n data_fname = bids_path.datatype + '/' + fname\n fnames = scans_tsv['filename']\n acq_times = scans_tsv['acq_time']\n row_ind = fnames.index(data_fname)\n\n # extract the acquisition time from scans file\n acq_time = acq_times[row_ind]\n if acq_time != 'n/a':\n # microseconds in the acquisition time is optional\n if '.' not in acq_time:\n # acquisition time ends with '.%fZ' microseconds string\n acq_time += '.0Z'\n acq_time = datetime.strptime(acq_time, '%Y-%m-%dT%H:%M:%S.%fZ')\n acq_time = acq_time.replace(tzinfo=timezone.utc)\n\n if verbose:\n logger.debug(f'Loaded {scans_fname} scans file to set '\n f'acq_time as {acq_time}.')\n raw.set_meas_date(acq_time)\n return raw\n\n\ndef _handle_info_reading(sidecar_fname, raw, verbose=None):\n \"\"\"Read associated sidecar.json and populate raw.\n\n Handle PowerLineFrequency of recording.\n \"\"\"\n with open(sidecar_fname, 'r', encoding='utf-8-sig') as fin:\n sidecar_json = json.load(fin)\n\n # read in the sidecar JSON's line frequency\n line_freq = sidecar_json.get(\"PowerLineFrequency\")\n if line_freq == \"n/a\":\n line_freq = None\n\n if raw.info[\"line_freq\"] is not None and line_freq is None:\n line_freq = raw.info[\"line_freq\"] # take from file is present\n\n if raw.info[\"line_freq\"] is not None and line_freq is not None:\n # if both have a set Power Line Frequency, then\n # check that they are the same, else there is a\n # discrepency in the metadata of the dataset.\n if raw.info[\"line_freq\"] != line_freq:\n raise ValueError(\"Line frequency in sidecar json does \"\n \"not match the info datastructure of \"\n \"the mne.Raw. \"\n \"Raw is -> {} \".format(raw.info[\"line_freq\"]),\n \"Sidecar JSON is -> {} \".format(line_freq))\n\n raw.info[\"line_freq\"] = line_freq\n return raw\n\n\ndef _handle_events_reading(events_fname, raw):\n \"\"\"Read associated events.tsv and populate raw.\n\n Handle onset, duration, and description of each event.\n \"\"\"\n logger.info('Reading events from {}.'.format(events_fname))\n events_dict = _from_tsv(events_fname)\n\n # Get the descriptions of the events\n if 'trial_type' in events_dict:\n trial_type_col_name = 'trial_type'\n elif 'stim_type' in events_dict: # Backward-compat with old datasets.\n trial_type_col_name = 'stim_type'\n warn(f'The events file, {events_fname}, contains a \"stim_type\" '\n f'column. This column should be renamed to \"trial_type\" for '\n f'BIDS compatibility.')\n else:\n trial_type_col_name = None\n\n if trial_type_col_name is not None:\n # Drop events unrelated to a trial type\n events_dict = _drop(events_dict, 'n/a', trial_type_col_name)\n\n if 'value' in events_dict:\n # Check whether the `trial_type` <> `value` mapping is unique.\n trial_types = events_dict[trial_type_col_name]\n values = np.asarray(events_dict['value'], dtype=str)\n for trial_type in np.unique(trial_types):\n idx = np.where(trial_type == np.atleast_1d(trial_types))[0]\n matching_values = values[idx]\n\n if len(np.unique(matching_values)) > 1:\n # Event type descriptors are ambiguous; create hierarchical\n # event descriptors.\n logger.info(\n f'The event \"{trial_type}\" refers to multiple event '\n f'values. Creating hierarchical event names.')\n for ii in idx:\n new_name = f'{trial_type}/{values[ii]}'\n logger.info(f' Renaming event: {trial_type} -> '\n f'{new_name}')\n trial_types[ii] = new_name\n descriptions = np.asarray(trial_types, dtype=str)\n else:\n descriptions = np.asarray(events_dict[trial_type_col_name],\n dtype=str)\n elif 'value' in events_dict:\n # If we don't have a proper description of the events, perhaps we have\n # at least an event value?\n # Drop events unrelated to value\n events_dict = _drop(events_dict, 'n/a', 'value')\n descriptions = np.asarray(events_dict['value'], dtype=str)\n\n # Worst case, we go with 'n/a' for all events\n else:\n descriptions = np.array(['n/a'] * len(events_dict['onset']), dtype=str)\n\n # Deal with \"n/a\" strings before converting to float\n ons = [np.nan if on == 'n/a' else on for on in events_dict['onset']]\n dus = [0 if du == 'n/a' else du for du in events_dict['duration']]\n onsets = np.asarray(ons, dtype=float)\n durations = np.asarray(dus, dtype=float)\n\n # Keep only events where onset is known\n good_events_idx = ~np.isnan(onsets)\n onsets = onsets[good_events_idx]\n durations = durations[good_events_idx]\n descriptions = descriptions[good_events_idx]\n del good_events_idx\n\n # Add Events to raw as annotations\n annot_from_events = mne.Annotations(onset=onsets,\n duration=durations,\n description=descriptions,\n orig_time=None)\n raw.set_annotations(annot_from_events)\n return raw\n\n\ndef _get_bads_from_tsv_data(tsv_data):\n \"\"\"Extract names of bads from data read from channels.tsv.\"\"\"\n idx = []\n for ch_idx, status in enumerate(tsv_data['status']):\n if status.lower() == 'bad':\n idx.append(ch_idx)\n\n bads = [tsv_data['name'][i] for i in idx]\n return bads\n\n\ndef _handle_channels_reading(channels_fname, raw):\n \"\"\"Read associated channels.tsv and populate raw.\n\n Updates status (bad) and types of channels.\n \"\"\"\n logger.info('Reading channel info from {}.'.format(channels_fname))\n channels_dict = _from_tsv(channels_fname)\n ch_names_tsv = channels_dict['name']\n\n # Now we can do some work.\n # The \"type\" column is mandatory in BIDS. We can use it to set channel\n # types in the raw data using a mapping between channel types\n channel_type_dict = dict()\n\n # Get the best mapping we currently have from BIDS to MNE nomenclature\n bids_to_mne_ch_types = _get_ch_type_mapping(fro='bids', to='mne')\n ch_types_json = channels_dict['type']\n for ch_name, ch_type in zip(ch_names_tsv, ch_types_json):\n\n # Try to map from BIDS nomenclature to MNE, leave channel type\n # untouched if we are uncertain\n updated_ch_type = bids_to_mne_ch_types.get(ch_type, None)\n\n if updated_ch_type is None:\n # XXX Try again with uppercase spelling – this should be removed\n # XXX once https://github.com/bids-standard/bids-validator/issues/1018 # noqa:E501\n # XXX has been resolved.\n # XXX x-ref https://github.com/mne-tools/mne-bids/issues/481\n updated_ch_type = bids_to_mne_ch_types.get(ch_type.upper(), None)\n if updated_ch_type is not None:\n msg = ('The BIDS dataset contains channel types in lowercase '\n 'spelling. This violates the BIDS specification and '\n 'will raise an error in the future.')\n warn(msg)\n\n if updated_ch_type is not None:\n channel_type_dict[ch_name] = updated_ch_type\n\n # Rename channels in loaded Raw to match those read from the BIDS sidecar\n for bids_ch_name, raw_ch_name in zip(ch_names_tsv, raw.ch_names.copy()):\n if bids_ch_name != raw_ch_name:\n raw.rename_channels({raw_ch_name: bids_ch_name})\n\n # Set the channel types in the raw data according to channels.tsv\n raw.set_channel_types(channel_type_dict)\n\n # Set bad channels based on _channels.tsv sidecar\n if 'status' in channels_dict:\n bads = _get_bads_from_tsv_data(channels_dict)\n raw.info['bads'] = bads\n\n return raw\n\n\ndef read_raw_bids(bids_path, extra_params=None, verbose=True):\n \"\"\"Read BIDS compatible data.\n\n Will attempt to read associated events.tsv and channels.tsv files to\n populate the returned raw object with raw.annotations and raw.info['bads'].\n\n Parameters\n ----------\n bids_path : mne_bids.BIDSPath\n The file to read. The :class:`mne_bids.BIDSPath` instance passed here\n **must** have the ``.root`` attribute set. The ``.datatype`` attribute\n **may** be set. If ``.datatype`` is not set and only one data type\n (e.g., only EEG or MEG data) is present in the dataset, it will be\n selected automatically.\n extra_params : None | dict\n Extra parameters to be passed to MNE read_raw_* functions.\n If a dict, for example: ``extra_params=dict(allow_maxshield=True)``.\n Note that the ``exclude`` parameter, which is supported by some\n MNE-Python readers, is not supported; instead, you need to subset\n your channels **after** reading.\n verbose : bool\n The verbosity level.\n\n Returns\n -------\n raw : mne.io.Raw\n The data as MNE-Python Raw object.\n\n Raises\n ------\n RuntimeError\n If multiple recording data types are present in the dataset, but\n ``datatype=None``.\n\n RuntimeError\n If more than one data files exist for the specified recording.\n\n RuntimeError\n If no data file in a supported format can be located.\n\n ValueError\n If the specified ``datatype`` cannot be found in the dataset.\n\n \"\"\"\n if not isinstance(bids_path, BIDSPath):\n raise RuntimeError('\"bids_path\" must be a BIDSPath object. Please '\n 'instantiate using mne_bids.BIDSPath().')\n\n bids_path = bids_path.copy()\n sub = bids_path.subject\n ses = bids_path.session\n bids_root = bids_path.root\n datatype = bids_path.datatype\n suffix = bids_path.suffix\n\n # check root available\n if bids_root is None:\n raise ValueError('The root of the \"bids_path\" must be set. '\n 'Please use `bids_path.update(root=\"<root>\")` '\n 'to set the root of the BIDS folder to read.')\n\n # infer the datatype and suffix if they are not present in the BIDSPath\n if datatype is None:\n datatype = _infer_datatype(root=bids_root, sub=sub, ses=ses)\n bids_path.update(datatype=datatype)\n if suffix is None:\n bids_path.update(suffix=datatype)\n\n data_dir = bids_path.directory\n bids_fname = bids_path.fpath.name\n\n if op.splitext(bids_fname)[1] == '.pdf':\n bids_raw_folder = op.join(data_dir, f'{bids_path.basename}')\n bids_fpath = glob.glob(op.join(bids_raw_folder, 'c,rf*'))[0]\n config = op.join(bids_raw_folder, 'config')\n else:\n bids_fpath = op.join(data_dir, bids_fname)\n config = None\n\n if extra_params is None:\n extra_params = dict()\n elif 'exclude' in extra_params:\n del extra_params['exclude']\n logger.info('\"exclude\" parameter is not supported by read_raw_bids')\n\n raw = _read_raw(bids_fpath, electrode=None, hsp=None, hpi=None,\n config=config, verbose=None, **extra_params)\n\n # Try to find an associated events.tsv to get information about the\n # events in the recorded data\n events_fname = _find_matching_sidecar(bids_path, suffix='events',\n extension='.tsv',\n on_error='warn')\n if events_fname is not None:\n raw = _handle_events_reading(events_fname, raw)\n\n # Try to find an associated channels.tsv to get information about the\n # status and type of present channels\n channels_fname = _find_matching_sidecar(bids_path,\n suffix='channels',\n extension='.tsv',\n on_error='warn')\n if channels_fname is not None:\n raw = _handle_channels_reading(channels_fname, raw)\n\n # Try to find an associated electrodes.tsv and coordsystem.json\n # to get information about the status and type of present channels\n on_error = 'warn' if suffix == 'ieeg' else 'ignore'\n electrodes_fname = _find_matching_sidecar(bids_path,\n suffix='electrodes',\n extension='.tsv',\n on_error=on_error)\n coordsystem_fname = _find_matching_sidecar(bids_path,\n suffix='coordsystem',\n extension='.json',\n on_error=on_error)\n if electrodes_fname is not None:\n if coordsystem_fname is None:\n raise RuntimeError(f\"BIDS mandates that the coordsystem.json \"\n f\"should exist if electrodes.tsv does. \"\n f\"Please create coordsystem.json for\"\n f\"{bids_path.basename}\")\n if datatype in ['meg', 'eeg', 'ieeg']:\n raw = _read_dig_bids(electrodes_fname, coordsystem_fname,\n raw, datatype, verbose)\n\n # Try to find an associated sidecar .json to get information about the\n # recording snapshot\n sidecar_fname = _find_matching_sidecar(bids_path,\n suffix=datatype,\n extension='.json',\n on_error='warn')\n if sidecar_fname is not None:\n raw = _handle_info_reading(sidecar_fname, raw, verbose=verbose)\n\n # read in associated scans filename\n scans_fname = BIDSPath(\n subject=bids_path.subject, session=bids_path.session,\n suffix='scans', extension='.tsv',\n root=bids_path.root\n ).fpath\n if scans_fname.exists():\n raw = _handle_scans_reading(scans_fname, raw, bids_path,\n verbose=verbose)\n\n # read in associated subject info from participants.tsv\n participants_tsv_fpath = op.join(bids_root, 'participants.tsv')\n subject = f\"sub-{bids_path.subject}\"\n if op.exists(participants_tsv_fpath):\n raw = _handle_participants_reading(participants_tsv_fpath, raw,\n subject, verbose=verbose)\n else:\n warn(\"Participants file not found for {}... Not reading \"\n \"in any particpants.tsv data.\".format(bids_fname))\n\n return raw\n\n\ndef get_head_mri_trans(bids_path, extra_params=None):\n \"\"\"Produce transformation matrix from MEG and MRI landmark points.\n\n Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar\n files of (i) the MEG and (ii) the T1 weighted MRI data. The two sets of\n points will then be used to calculate a transformation matrix from head\n coordinates to MRI coordinates.\n\n Parameters\n ----------\n bids_path : mne_bids.BIDSPath\n The path of the recording for which to retrieve the transformation. The\n :class:`mne_bids.BIDSPath` instance passed here **must** have the\n ``.root`` attribute set.\n extra_params : None | dict\n Extra parameters to be passed to MNE read_raw_* functions when reading\n the lankmarks from the MEG file.\n If a dict, for example: ``extra_params=dict(allow_maxshield=True)``.\n\n Returns\n -------\n trans : mne.transforms.Transform\n The data transformation matrix from head to MRI coordinates\n\n \"\"\"\n if not has_nibabel(): # pragma: no cover\n raise ImportError('This function requires nibabel.')\n import nibabel as nib\n\n if not isinstance(bids_path, BIDSPath):\n raise RuntimeError('\"bids_path\" must be a BIDSPath object. Please '\n 'instantiate using mne_bids.BIDSPath().')\n\n # check root available\n bids_path = bids_path.copy()\n bids_root = bids_path.root\n if bids_root is None:\n raise ValueError('The root of the \"bids_path\" must be set. '\n 'Please use `bids_path.update(root=\"<root>\")` '\n 'to set the root of the BIDS folder to read.')\n # only get this for MEG data\n bids_path.update(datatype='meg')\n\n # Get the sidecar file for MRI landmarks\n bids_fname = bids_path.update(suffix='meg', root=bids_root)\n t1w_json_path = _find_matching_sidecar(bids_fname, suffix='T1w',\n extension='.json')\n\n # Get MRI landmarks from the JSON sidecar\n with open(t1w_json_path, 'r', encoding='utf-8-sig') as f:\n t1w_json = json.load(f)\n mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict())\n mri_landmarks = np.asarray((mri_coords_dict.get('LPA', np.nan),\n mri_coords_dict.get('NAS', np.nan),\n mri_coords_dict.get('RPA', np.nan)))\n if np.isnan(mri_landmarks).any():\n raise RuntimeError('Could not parse T1w sidecar file: \"{}\"\\n\\n'\n 'The sidecar file MUST contain a key '\n '\"AnatomicalLandmarkCoordinates\" pointing to a '\n 'dict with keys \"LPA\", \"NAS\", \"RPA\". '\n 'Yet, the following structure was found:\\n\\n\"{}\"'\n .format(t1w_json_path, t1w_json))\n\n # The MRI landmarks are in \"voxels\". We need to convert the to the\n # neuromag RAS coordinate system in order to compare the with MEG landmarks\n # see also: `mne_bids.write.write_anat`\n t1w_path = t1w_json_path.replace('.json', '.nii')\n if not op.exists(t1w_path):\n t1w_path += '.gz' # perhaps it is .nii.gz? ... else raise an error\n if not op.exists(t1w_path):\n raise RuntimeError('Could not find the T1 weighted MRI associated '\n 'with \"{}\". Tried: \"{}\" but it does not exist.'\n .format(t1w_json_path, t1w_path))\n t1_nifti = nib.load(t1w_path)\n # Convert to MGH format to access vox2ras method\n t1_mgh = nib.MGHImage(t1_nifti.dataobj, t1_nifti.affine)\n\n # now extract transformation matrix and put back to RAS coordinates of MRI\n vox2ras_tkr = t1_mgh.header.get_vox2ras_tkr()\n mri_landmarks = apply_trans(vox2ras_tkr, mri_landmarks)\n mri_landmarks = mri_landmarks * 1e-3\n\n # Get MEG landmarks from the raw file\n _, ext = _parse_ext(bids_fname)\n if extra_params is None:\n extra_params = dict()\n if ext == '.fif':\n extra_params = dict(allow_maxshield=True)\n\n raw = read_raw_bids(bids_path=bids_path, extra_params=extra_params)\n meg_coords_dict = _extract_landmarks(raw.info['dig'])\n meg_landmarks = np.asarray((meg_coords_dict['LPA'],\n meg_coords_dict['NAS'],\n meg_coords_dict['RPA']))\n\n # Given the two sets of points, fit the transform\n trans_fitted = fit_matched_points(src_pts=meg_landmarks,\n tgt_pts=mri_landmarks)\n trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted)\n return trans\n", "path": "mne_bids/read.py" } ]
diff --git a/doc/whats_new.rst b/doc/whats_new.rst index f5557ec3a..a4bf8231b 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -56,6 +56,7 @@ Bug fixes - :func:`mne_bids.stats.count_events` now handles files without a ``trial_type`` or ``stim_type`` column gracefully, by `Richard Höchenberger`_ (:gh:`682`) - :func:`mne_bids.read_raw_bids` now correctly treats ``coordsystem.json`` as optional for EEG and MEG data, by `Diego Lozano-Soldevilla`_ (:gh:`691`) - :func:`mne_bids.read_raw_bids` now ignores ``exclude`` parameters passed via ``extra_params``, by `Richard Höchenberger`_ (:gh:`703`) +- :func:`mne_bids.write_raw_bids` now retains original event IDs in the ``value`` column of ``*_events.tsv``, by `Richard Höchenberger`_ (:gh:`708`) :doc:`Find out what was new in previous releases <whats_new_previous_releases>` diff --git a/mne_bids/read.py b/mne_bids/read.py index 96389193c..7f9feecd2 100644 --- a/mne_bids/read.py +++ b/mne_bids/read.py @@ -144,6 +144,7 @@ def _read_events(events_data, event_id, raw, verbose=None): # Now convert the Annotations to events. all_events, all_desc = events_from_annotations( raw, + event_id=event_id, regexp=None, # Include `BAD_` and `EDGE_` Annotations, too. verbose=verbose ) diff --git a/mne_bids/tests/test_write.py b/mne_bids/tests/test_write.py index 1be9ee111..7b5eea8b3 100644 --- a/mne_bids/tests/test_write.py +++ b/mne_bids/tests/test_write.py @@ -2154,6 +2154,36 @@ def test_undescribed_events(_bids_validate, drop_undescribed_events): _bids_validate(bids_root) [email protected](warning_str['channel_unit_changed']) +def test_event_storage(): + """Test we're retaining the original event IDs when storing events.""" + bids_root = _TempDir() + bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') + data_path = testing.data_path() + raw_fname = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc_raw.fif') + events_fname = op.join(data_path, 'MEG', 'sample', + 'sample_audvis_trunc_raw-eve.fif') + events_tsv_fname = (bids_path.copy() + .update(suffix='events', extension='.tsv')) + + events = mne.read_events(events_fname) + events = events[events[:, -1] != 0] # Drop unused events + # Change an event ID + idx = np.where(events[:, -1] == 1)[0] + events[idx, -1] = 123 + + event_id = {'Auditory/Left': 123, 'Auditory/Right': 2, 'Visual/Left': 3, + 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} + + raw = _read_raw_fif(raw_fname) + write_raw_bids(raw=raw, bids_path=bids_path, events_data=events, + event_id=event_id, overwrite=False) + + events_tsv = _from_tsv(events_tsv_fname) + assert set(int(e) for e in events_tsv['value']) == set(event_id.values()) + + @pytest.mark.parametrize( 'dir_name, fname, reader, datatype, coord_frame', [ ('EDF', 'test_reduced.edf', _read_raw_edf, 'ieeg', 'mni_tal'),
PaddlePaddle__PaddleOCR-5072
Windows下, OCR识别报错-- 'env' 不是内部或外部命令,也不是可运行的程序或批处理文件 请提供下述完整信息以便快速定位问题/Please provide the following information to quickly locate the problem - 系统环境/System Environment:Windows10 - 版本号/Version:Paddle:2.3 PaddleOCR:2.3.0.1 - 问题相关组件/Related components: ppocr - 运行指令/Command Code: - ![QQ图片20211227103121](https://user-images.githubusercontent.com/3079297/147428430-1e61ca3e-c4ce-4c1e-8095-7ec327592752.jpg) - 完整报错/Complete Error Message: ![QQ图片20211226232206](https://user-images.githubusercontent.com/3079297/147428471-03f24025-8077-4491-883e-77b7ee20326a.jpg) 应该是一下代码引起: PaddleOCR/tools/infer/utility.py Line#315 ``` def get_infer_gpuid(): if not paddle.fluid.core.is_compiled_with_rocm(): cmd = "env | grep CUDA_VISIBLE_DEVICES" else: cmd = "env | grep HIP_VISIBLE_DEVICES" env_cuda = os.popen(cmd).readlines() ``` 直接将代码替换为 ``` def get_infer_gpuid(): return 0 ``` 可临时解决.
[ { "content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport paddle\nfrom PIL import Image, ImageDraw, ImageFont\nimport math\nfrom paddle import inference\nimport time\nfrom ppocr.utils.logging import get_logger\n\n\ndef str2bool(v):\n return v.lower() in (\"true\", \"t\", \"1\")\n\n\ndef init_args():\n parser = argparse.ArgumentParser()\n # params for prediction engine\n parser.add_argument(\"--use_gpu\", type=str2bool, default=True)\n parser.add_argument(\"--ir_optim\", type=str2bool, default=True)\n parser.add_argument(\"--use_tensorrt\", type=str2bool, default=False)\n parser.add_argument(\"--min_subgraph_size\", type=int, default=15)\n parser.add_argument(\"--precision\", type=str, default=\"fp32\")\n parser.add_argument(\"--gpu_mem\", type=int, default=500)\n\n # params for text detector\n parser.add_argument(\"--image_dir\", type=str)\n parser.add_argument(\"--det_algorithm\", type=str, default='DB')\n parser.add_argument(\"--det_model_dir\", type=str)\n parser.add_argument(\"--det_limit_side_len\", type=float, default=960)\n parser.add_argument(\"--det_limit_type\", type=str, default='max')\n\n # DB parmas\n parser.add_argument(\"--det_db_thresh\", type=float, default=0.3)\n parser.add_argument(\"--det_db_box_thresh\", type=float, default=0.6)\n parser.add_argument(\"--det_db_unclip_ratio\", type=float, default=1.5)\n parser.add_argument(\"--max_batch_size\", type=int, default=10)\n parser.add_argument(\"--use_dilation\", type=str2bool, default=False)\n parser.add_argument(\"--det_db_score_mode\", type=str, default=\"fast\")\n # EAST parmas\n parser.add_argument(\"--det_east_score_thresh\", type=float, default=0.8)\n parser.add_argument(\"--det_east_cover_thresh\", type=float, default=0.1)\n parser.add_argument(\"--det_east_nms_thresh\", type=float, default=0.2)\n\n # SAST parmas\n parser.add_argument(\"--det_sast_score_thresh\", type=float, default=0.5)\n parser.add_argument(\"--det_sast_nms_thresh\", type=float, default=0.2)\n parser.add_argument(\"--det_sast_polygon\", type=str2bool, default=False)\n\n # PSE parmas\n parser.add_argument(\"--det_pse_thresh\", type=float, default=0)\n parser.add_argument(\"--det_pse_box_thresh\", type=float, default=0.85)\n parser.add_argument(\"--det_pse_min_area\", type=float, default=16)\n parser.add_argument(\"--det_pse_box_type\", type=str, default='box')\n parser.add_argument(\"--det_pse_scale\", type=int, default=1)\n\n # params for text recognizer\n parser.add_argument(\"--rec_algorithm\", type=str, default='CRNN')\n parser.add_argument(\"--rec_model_dir\", type=str)\n parser.add_argument(\"--rec_image_shape\", type=str, default=\"3, 32, 320\")\n parser.add_argument(\"--rec_batch_num\", type=int, default=6)\n parser.add_argument(\"--max_text_length\", type=int, default=25)\n parser.add_argument(\n \"--rec_char_dict_path\",\n type=str,\n default=\"./ppocr/utils/ppocr_keys_v1.txt\")\n parser.add_argument(\"--use_space_char\", type=str2bool, default=True)\n parser.add_argument(\n \"--vis_font_path\", type=str, default=\"./doc/fonts/simfang.ttf\")\n parser.add_argument(\"--drop_score\", type=float, default=0.5)\n\n # params for e2e\n parser.add_argument(\"--e2e_algorithm\", type=str, default='PGNet')\n parser.add_argument(\"--e2e_model_dir\", type=str)\n parser.add_argument(\"--e2e_limit_side_len\", type=float, default=768)\n parser.add_argument(\"--e2e_limit_type\", type=str, default='max')\n\n # PGNet parmas\n parser.add_argument(\"--e2e_pgnet_score_thresh\", type=float, default=0.5)\n parser.add_argument(\n \"--e2e_char_dict_path\", type=str, default=\"./ppocr/utils/ic15_dict.txt\")\n parser.add_argument(\"--e2e_pgnet_valid_set\", type=str, default='totaltext')\n parser.add_argument(\"--e2e_pgnet_mode\", type=str, default='fast')\n\n # params for text classifier\n parser.add_argument(\"--use_angle_cls\", type=str2bool, default=False)\n parser.add_argument(\"--cls_model_dir\", type=str)\n parser.add_argument(\"--cls_image_shape\", type=str, default=\"3, 48, 192\")\n parser.add_argument(\"--label_list\", type=list, default=['0', '180'])\n parser.add_argument(\"--cls_batch_num\", type=int, default=6)\n parser.add_argument(\"--cls_thresh\", type=float, default=0.9)\n\n parser.add_argument(\"--enable_mkldnn\", type=str2bool, default=False)\n parser.add_argument(\"--cpu_threads\", type=int, default=10)\n parser.add_argument(\"--use_pdserving\", type=str2bool, default=False)\n parser.add_argument(\"--warmup\", type=str2bool, default=False)\n\n #\n parser.add_argument(\n \"--draw_img_save_dir\", type=str, default=\"./inference_results\")\n parser.add_argument(\"--save_crop_res\", type=str2bool, default=False)\n parser.add_argument(\"--crop_res_save_dir\", type=str, default=\"./output\")\n\n # multi-process\n parser.add_argument(\"--use_mp\", type=str2bool, default=False)\n parser.add_argument(\"--total_process_num\", type=int, default=1)\n parser.add_argument(\"--process_id\", type=int, default=0)\n\n parser.add_argument(\"--benchmark\", type=str2bool, default=False)\n parser.add_argument(\"--save_log_path\", type=str, default=\"./log_output/\")\n\n parser.add_argument(\"--show_log\", type=str2bool, default=True)\n parser.add_argument(\"--use_onnx\", type=str2bool, default=False)\n return parser\n\n\ndef parse_args():\n parser = init_args()\n return parser.parse_args()\n\n\ndef create_predictor(args, mode, logger):\n if mode == \"det\":\n model_dir = args.det_model_dir\n elif mode == 'cls':\n model_dir = args.cls_model_dir\n elif mode == 'rec':\n model_dir = args.rec_model_dir\n elif mode == 'table':\n model_dir = args.table_model_dir\n else:\n model_dir = args.e2e_model_dir\n\n if model_dir is None:\n logger.info(\"not find {} model file path {}\".format(mode, model_dir))\n sys.exit(0)\n if args.use_onnx:\n import onnxruntime as ort\n model_file_path = model_dir\n if not os.path.exists(model_file_path):\n raise ValueError(\"not find model file path {}\".format(\n model_file_path))\n sess = ort.InferenceSession(model_file_path)\n return sess, sess.get_inputs()[0], None, None\n\n else:\n model_file_path = model_dir + \"/inference.pdmodel\"\n params_file_path = model_dir + \"/inference.pdiparams\"\n if not os.path.exists(model_file_path):\n raise ValueError(\"not find model file path {}\".format(\n model_file_path))\n if not os.path.exists(params_file_path):\n raise ValueError(\"not find params file path {}\".format(\n params_file_path))\n\n config = inference.Config(model_file_path, params_file_path)\n\n if hasattr(args, 'precision'):\n if args.precision == \"fp16\" and args.use_tensorrt:\n precision = inference.PrecisionType.Half\n elif args.precision == \"int8\":\n precision = inference.PrecisionType.Int8\n else:\n precision = inference.PrecisionType.Float32\n else:\n precision = inference.PrecisionType.Float32\n\n if args.use_gpu:\n gpu_id = get_infer_gpuid()\n if gpu_id is None:\n logger.warning(\n \"GPU is not found in current device by nvidia-smi. Please check your device or ignore it if run on jeston.\"\n )\n config.enable_use_gpu(args.gpu_mem, 0)\n if args.use_tensorrt:\n config.enable_tensorrt_engine(\n workspace_size=1 << 30,\n precision_mode=precision,\n max_batch_size=args.max_batch_size,\n min_subgraph_size=args.min_subgraph_size)\n # skip the minmum trt subgraph\n use_dynamic_shape = True\n if mode == \"det\":\n min_input_shape = {\n \"x\": [1, 3, 50, 50],\n \"conv2d_92.tmp_0\": [1, 120, 20, 20],\n \"conv2d_91.tmp_0\": [1, 24, 10, 10],\n \"conv2d_59.tmp_0\": [1, 96, 20, 20],\n \"nearest_interp_v2_1.tmp_0\": [1, 256, 10, 10],\n \"nearest_interp_v2_2.tmp_0\": [1, 256, 20, 20],\n \"conv2d_124.tmp_0\": [1, 256, 20, 20],\n \"nearest_interp_v2_3.tmp_0\": [1, 64, 20, 20],\n \"nearest_interp_v2_4.tmp_0\": [1, 64, 20, 20],\n \"nearest_interp_v2_5.tmp_0\": [1, 64, 20, 20],\n \"elementwise_add_7\": [1, 56, 2, 2],\n \"nearest_interp_v2_0.tmp_0\": [1, 256, 2, 2]\n }\n max_input_shape = {\n \"x\": [1, 3, 1536, 1536],\n \"conv2d_92.tmp_0\": [1, 120, 400, 400],\n \"conv2d_91.tmp_0\": [1, 24, 200, 200],\n \"conv2d_59.tmp_0\": [1, 96, 400, 400],\n \"nearest_interp_v2_1.tmp_0\": [1, 256, 200, 200],\n \"conv2d_124.tmp_0\": [1, 256, 400, 400],\n \"nearest_interp_v2_2.tmp_0\": [1, 256, 400, 400],\n \"nearest_interp_v2_3.tmp_0\": [1, 64, 400, 400],\n \"nearest_interp_v2_4.tmp_0\": [1, 64, 400, 400],\n \"nearest_interp_v2_5.tmp_0\": [1, 64, 400, 400],\n \"elementwise_add_7\": [1, 56, 400, 400],\n \"nearest_interp_v2_0.tmp_0\": [1, 256, 400, 400]\n }\n opt_input_shape = {\n \"x\": [1, 3, 640, 640],\n \"conv2d_92.tmp_0\": [1, 120, 160, 160],\n \"conv2d_91.tmp_0\": [1, 24, 80, 80],\n \"conv2d_59.tmp_0\": [1, 96, 160, 160],\n \"nearest_interp_v2_1.tmp_0\": [1, 256, 80, 80],\n \"nearest_interp_v2_2.tmp_0\": [1, 256, 160, 160],\n \"conv2d_124.tmp_0\": [1, 256, 160, 160],\n \"nearest_interp_v2_3.tmp_0\": [1, 64, 160, 160],\n \"nearest_interp_v2_4.tmp_0\": [1, 64, 160, 160],\n \"nearest_interp_v2_5.tmp_0\": [1, 64, 160, 160],\n \"elementwise_add_7\": [1, 56, 40, 40],\n \"nearest_interp_v2_0.tmp_0\": [1, 256, 40, 40]\n }\n min_pact_shape = {\n \"nearest_interp_v2_26.tmp_0\": [1, 256, 20, 20],\n \"nearest_interp_v2_27.tmp_0\": [1, 64, 20, 20],\n \"nearest_interp_v2_28.tmp_0\": [1, 64, 20, 20],\n \"nearest_interp_v2_29.tmp_0\": [1, 64, 20, 20]\n }\n max_pact_shape = {\n \"nearest_interp_v2_26.tmp_0\": [1, 256, 400, 400],\n \"nearest_interp_v2_27.tmp_0\": [1, 64, 400, 400],\n \"nearest_interp_v2_28.tmp_0\": [1, 64, 400, 400],\n \"nearest_interp_v2_29.tmp_0\": [1, 64, 400, 400]\n }\n opt_pact_shape = {\n \"nearest_interp_v2_26.tmp_0\": [1, 256, 160, 160],\n \"nearest_interp_v2_27.tmp_0\": [1, 64, 160, 160],\n \"nearest_interp_v2_28.tmp_0\": [1, 64, 160, 160],\n \"nearest_interp_v2_29.tmp_0\": [1, 64, 160, 160]\n }\n min_input_shape.update(min_pact_shape)\n max_input_shape.update(max_pact_shape)\n opt_input_shape.update(opt_pact_shape)\n elif mode == \"rec\":\n if args.rec_algorithm != \"CRNN\":\n use_dynamic_shape = False\n min_input_shape = {\"x\": [1, 3, 32, 10]}\n max_input_shape = {\"x\": [args.rec_batch_num, 3, 32, 1536]}\n opt_input_shape = {\"x\": [args.rec_batch_num, 3, 32, 320]}\n elif mode == \"cls\":\n min_input_shape = {\"x\": [1, 3, 48, 10]}\n max_input_shape = {\"x\": [args.rec_batch_num, 3, 48, 1024]}\n opt_input_shape = {\"x\": [args.rec_batch_num, 3, 48, 320]}\n else:\n use_dynamic_shape = False\n if use_dynamic_shape:\n config.set_trt_dynamic_shape_info(\n min_input_shape, max_input_shape, opt_input_shape)\n\n else:\n config.disable_gpu()\n if hasattr(args, \"cpu_threads\"):\n config.set_cpu_math_library_num_threads(args.cpu_threads)\n else:\n # default cpu threads as 10\n config.set_cpu_math_library_num_threads(10)\n if args.enable_mkldnn:\n # cache 10 different shapes for mkldnn to avoid memory leak\n config.set_mkldnn_cache_capacity(10)\n config.enable_mkldnn()\n if args.precision == \"fp16\":\n config.enable_mkldnn_bfloat16()\n # enable memory optim\n config.enable_memory_optim()\n config.disable_glog_info()\n\n config.delete_pass(\"conv_transpose_eltwiseadd_bn_fuse_pass\")\n if mode == 'table':\n config.delete_pass(\"fc_fuse_pass\") # not supported for table\n config.switch_use_feed_fetch_ops(False)\n config.switch_ir_optim(True)\n\n # create predictor\n predictor = inference.create_predictor(config)\n input_names = predictor.get_input_names()\n for name in input_names:\n input_tensor = predictor.get_input_handle(name)\n output_names = predictor.get_output_names()\n output_tensors = []\n for output_name in output_names:\n output_tensor = predictor.get_output_handle(output_name)\n output_tensors.append(output_tensor)\n return predictor, input_tensor, output_tensors, config\n\n\ndef get_infer_gpuid():\n if not paddle.fluid.core.is_compiled_with_rocm():\n cmd = \"env | grep CUDA_VISIBLE_DEVICES\"\n else:\n cmd = \"env | grep HIP_VISIBLE_DEVICES\"\n env_cuda = os.popen(cmd).readlines()\n if len(env_cuda) == 0:\n return 0\n else:\n gpu_id = env_cuda[0].strip().split(\"=\")[1]\n return int(gpu_id[0])\n\n\ndef draw_e2e_res(dt_boxes, strs, img_path):\n src_im = cv2.imread(img_path)\n for box, str in zip(dt_boxes, strs):\n box = box.astype(np.int32).reshape((-1, 1, 2))\n cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)\n cv2.putText(\n src_im,\n str,\n org=(int(box[0, 0, 0]), int(box[0, 0, 1])),\n fontFace=cv2.FONT_HERSHEY_COMPLEX,\n fontScale=0.7,\n color=(0, 255, 0),\n thickness=1)\n return src_im\n\n\ndef draw_text_det_res(dt_boxes, img_path):\n src_im = cv2.imread(img_path)\n for box in dt_boxes:\n box = np.array(box).astype(np.int32).reshape(-1, 2)\n cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)\n return src_im\n\n\ndef resize_img(img, input_size=600):\n \"\"\"\n resize img and limit the longest side of the image to input_size\n \"\"\"\n img = np.array(img)\n im_shape = img.shape\n im_size_max = np.max(im_shape[0:2])\n im_scale = float(input_size) / float(im_size_max)\n img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)\n return img\n\n\ndef draw_ocr(image,\n boxes,\n txts=None,\n scores=None,\n drop_score=0.5,\n font_path=\"./doc/fonts/simfang.ttf\"):\n \"\"\"\n Visualize the results of OCR detection and recognition\n args:\n image(Image|array): RGB image\n boxes(list): boxes with shape(N, 4, 2)\n txts(list): the texts\n scores(list): txxs corresponding scores\n drop_score(float): only scores greater than drop_threshold will be visualized\n font_path: the path of font which is used to draw text\n return(array):\n the visualized img\n \"\"\"\n if scores is None:\n scores = [1] * len(boxes)\n box_num = len(boxes)\n for i in range(box_num):\n if scores is not None and (scores[i] < drop_score or\n math.isnan(scores[i])):\n continue\n box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)\n image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)\n if txts is not None:\n img = np.array(resize_img(image, input_size=600))\n txt_img = text_visual(\n txts,\n scores,\n img_h=img.shape[0],\n img_w=600,\n threshold=drop_score,\n font_path=font_path)\n img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)\n return img\n return image\n\n\ndef draw_ocr_box_txt(image,\n boxes,\n txts,\n scores=None,\n drop_score=0.5,\n font_path=\"./doc/simfang.ttf\"):\n h, w = image.height, image.width\n img_left = image.copy()\n img_right = Image.new('RGB', (w, h), (255, 255, 255))\n\n import random\n\n random.seed(0)\n draw_left = ImageDraw.Draw(img_left)\n draw_right = ImageDraw.Draw(img_right)\n for idx, (box, txt) in enumerate(zip(boxes, txts)):\n if scores is not None and scores[idx] < drop_score:\n continue\n color = (random.randint(0, 255), random.randint(0, 255),\n random.randint(0, 255))\n draw_left.polygon(box, fill=color)\n draw_right.polygon(\n [\n box[0][0], box[0][1], box[1][0], box[1][1], box[2][0],\n box[2][1], box[3][0], box[3][1]\n ],\n outline=color)\n box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][\n 1])**2)\n box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][\n 1])**2)\n if box_height > 2 * box_width:\n font_size = max(int(box_width * 0.9), 10)\n font = ImageFont.truetype(font_path, font_size, encoding=\"utf-8\")\n cur_y = box[0][1]\n for c in txt:\n char_size = font.getsize(c)\n draw_right.text(\n (box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)\n cur_y += char_size[1]\n else:\n font_size = max(int(box_height * 0.8), 10)\n font = ImageFont.truetype(font_path, font_size, encoding=\"utf-8\")\n draw_right.text(\n [box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)\n img_left = Image.blend(image, img_left, 0.5)\n img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))\n img_show.paste(img_left, (0, 0, w, h))\n img_show.paste(img_right, (w, 0, w * 2, h))\n return np.array(img_show)\n\n\ndef str_count(s):\n \"\"\"\n Count the number of Chinese characters,\n a single English character and a single number\n equal to half the length of Chinese characters.\n args:\n s(string): the input of string\n return(int):\n the number of Chinese characters\n \"\"\"\n import string\n count_zh = count_pu = 0\n s_len = len(s)\n en_dg_count = 0\n for c in s:\n if c in string.ascii_letters or c.isdigit() or c.isspace():\n en_dg_count += 1\n elif c.isalpha():\n count_zh += 1\n else:\n count_pu += 1\n return s_len - math.ceil(en_dg_count / 2)\n\n\ndef text_visual(texts,\n scores,\n img_h=400,\n img_w=600,\n threshold=0.,\n font_path=\"./doc/simfang.ttf\"):\n \"\"\"\n create new blank img and draw txt on it\n args:\n texts(list): the text will be draw\n scores(list|None): corresponding score of each txt\n img_h(int): the height of blank img\n img_w(int): the width of blank img\n font_path: the path of font which is used to draw text\n return(array):\n \"\"\"\n if scores is not None:\n assert len(texts) == len(\n scores), \"The number of txts and corresponding scores must match\"\n\n def create_blank_img():\n blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255\n blank_img[:, img_w - 1:] = 0\n blank_img = Image.fromarray(blank_img).convert(\"RGB\")\n draw_txt = ImageDraw.Draw(blank_img)\n return blank_img, draw_txt\n\n blank_img, draw_txt = create_blank_img()\n\n font_size = 20\n txt_color = (0, 0, 0)\n font = ImageFont.truetype(font_path, font_size, encoding=\"utf-8\")\n\n gap = font_size + 5\n txt_img_list = []\n count, index = 1, 0\n for idx, txt in enumerate(texts):\n index += 1\n if scores[idx] < threshold or math.isnan(scores[idx]):\n index -= 1\n continue\n first_line = True\n while str_count(txt) >= img_w // font_size - 4:\n tmp = txt\n txt = tmp[:img_w // font_size - 4]\n if first_line:\n new_txt = str(index) + ': ' + txt\n first_line = False\n else:\n new_txt = ' ' + txt\n draw_txt.text((0, gap * count), new_txt, txt_color, font=font)\n txt = tmp[img_w // font_size - 4:]\n if count >= img_h // gap - 1:\n txt_img_list.append(np.array(blank_img))\n blank_img, draw_txt = create_blank_img()\n count = 0\n count += 1\n if first_line:\n new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])\n else:\n new_txt = \" \" + txt + \" \" + '%.3f' % (scores[idx])\n draw_txt.text((0, gap * count), new_txt, txt_color, font=font)\n # whether add new blank img or not\n if count >= img_h // gap - 1 and idx + 1 < len(texts):\n txt_img_list.append(np.array(blank_img))\n blank_img, draw_txt = create_blank_img()\n count = 0\n count += 1\n txt_img_list.append(np.array(blank_img))\n if len(txt_img_list) == 1:\n blank_img = np.array(txt_img_list[0])\n else:\n blank_img = np.concatenate(txt_img_list, axis=1)\n return np.array(blank_img)\n\n\ndef base64_to_cv2(b64str):\n import base64\n data = base64.b64decode(b64str.encode('utf8'))\n data = np.fromstring(data, np.uint8)\n data = cv2.imdecode(data, cv2.IMREAD_COLOR)\n return data\n\n\ndef draw_boxes(image, boxes, scores=None, drop_score=0.5):\n if scores is None:\n scores = [1] * len(boxes)\n for (box, score) in zip(boxes, scores):\n if score < drop_score:\n continue\n box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)\n image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)\n return image\n\n\ndef get_rotate_crop_image(img, points):\n '''\n img_height, img_width = img.shape[0:2]\n left = int(np.min(points[:, 0]))\n right = int(np.max(points[:, 0]))\n top = int(np.min(points[:, 1]))\n bottom = int(np.max(points[:, 1]))\n img_crop = img[top:bottom, left:right, :].copy()\n points[:, 0] = points[:, 0] - left\n points[:, 1] = points[:, 1] - top\n '''\n assert len(points) == 4, \"shape of points must be 4*2\"\n img_crop_width = int(\n max(\n np.linalg.norm(points[0] - points[1]),\n np.linalg.norm(points[2] - points[3])))\n img_crop_height = int(\n max(\n np.linalg.norm(points[0] - points[3]),\n np.linalg.norm(points[1] - points[2])))\n pts_std = np.float32([[0, 0], [img_crop_width, 0],\n [img_crop_width, img_crop_height],\n [0, img_crop_height]])\n M = cv2.getPerspectiveTransform(points, pts_std)\n dst_img = cv2.warpPerspective(\n img,\n M, (img_crop_width, img_crop_height),\n borderMode=cv2.BORDER_REPLICATE,\n flags=cv2.INTER_CUBIC)\n dst_img_height, dst_img_width = dst_img.shape[0:2]\n if dst_img_height * 1.0 / dst_img_width >= 1.5:\n dst_img = np.rot90(dst_img)\n return dst_img\n\n\ndef check_gpu(use_gpu):\n if use_gpu and not paddle.is_compiled_with_cuda():\n\n use_gpu = False\n return use_gpu\n\n\nif __name__ == '__main__':\n pass\n", "path": "tools/infer/utility.py" } ]
[ { "content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport paddle\nfrom PIL import Image, ImageDraw, ImageFont\nimport math\nfrom paddle import inference\nimport time\nfrom ppocr.utils.logging import get_logger\n\n\ndef str2bool(v):\n return v.lower() in (\"true\", \"t\", \"1\")\n\n\ndef init_args():\n parser = argparse.ArgumentParser()\n # params for prediction engine\n parser.add_argument(\"--use_gpu\", type=str2bool, default=True)\n parser.add_argument(\"--ir_optim\", type=str2bool, default=True)\n parser.add_argument(\"--use_tensorrt\", type=str2bool, default=False)\n parser.add_argument(\"--min_subgraph_size\", type=int, default=15)\n parser.add_argument(\"--precision\", type=str, default=\"fp32\")\n parser.add_argument(\"--gpu_mem\", type=int, default=500)\n\n # params for text detector\n parser.add_argument(\"--image_dir\", type=str)\n parser.add_argument(\"--det_algorithm\", type=str, default='DB')\n parser.add_argument(\"--det_model_dir\", type=str)\n parser.add_argument(\"--det_limit_side_len\", type=float, default=960)\n parser.add_argument(\"--det_limit_type\", type=str, default='max')\n\n # DB parmas\n parser.add_argument(\"--det_db_thresh\", type=float, default=0.3)\n parser.add_argument(\"--det_db_box_thresh\", type=float, default=0.6)\n parser.add_argument(\"--det_db_unclip_ratio\", type=float, default=1.5)\n parser.add_argument(\"--max_batch_size\", type=int, default=10)\n parser.add_argument(\"--use_dilation\", type=str2bool, default=False)\n parser.add_argument(\"--det_db_score_mode\", type=str, default=\"fast\")\n # EAST parmas\n parser.add_argument(\"--det_east_score_thresh\", type=float, default=0.8)\n parser.add_argument(\"--det_east_cover_thresh\", type=float, default=0.1)\n parser.add_argument(\"--det_east_nms_thresh\", type=float, default=0.2)\n\n # SAST parmas\n parser.add_argument(\"--det_sast_score_thresh\", type=float, default=0.5)\n parser.add_argument(\"--det_sast_nms_thresh\", type=float, default=0.2)\n parser.add_argument(\"--det_sast_polygon\", type=str2bool, default=False)\n\n # PSE parmas\n parser.add_argument(\"--det_pse_thresh\", type=float, default=0)\n parser.add_argument(\"--det_pse_box_thresh\", type=float, default=0.85)\n parser.add_argument(\"--det_pse_min_area\", type=float, default=16)\n parser.add_argument(\"--det_pse_box_type\", type=str, default='box')\n parser.add_argument(\"--det_pse_scale\", type=int, default=1)\n\n # params for text recognizer\n parser.add_argument(\"--rec_algorithm\", type=str, default='CRNN')\n parser.add_argument(\"--rec_model_dir\", type=str)\n parser.add_argument(\"--rec_image_shape\", type=str, default=\"3, 32, 320\")\n parser.add_argument(\"--rec_batch_num\", type=int, default=6)\n parser.add_argument(\"--max_text_length\", type=int, default=25)\n parser.add_argument(\n \"--rec_char_dict_path\",\n type=str,\n default=\"./ppocr/utils/ppocr_keys_v1.txt\")\n parser.add_argument(\"--use_space_char\", type=str2bool, default=True)\n parser.add_argument(\n \"--vis_font_path\", type=str, default=\"./doc/fonts/simfang.ttf\")\n parser.add_argument(\"--drop_score\", type=float, default=0.5)\n\n # params for e2e\n parser.add_argument(\"--e2e_algorithm\", type=str, default='PGNet')\n parser.add_argument(\"--e2e_model_dir\", type=str)\n parser.add_argument(\"--e2e_limit_side_len\", type=float, default=768)\n parser.add_argument(\"--e2e_limit_type\", type=str, default='max')\n\n # PGNet parmas\n parser.add_argument(\"--e2e_pgnet_score_thresh\", type=float, default=0.5)\n parser.add_argument(\n \"--e2e_char_dict_path\", type=str, default=\"./ppocr/utils/ic15_dict.txt\")\n parser.add_argument(\"--e2e_pgnet_valid_set\", type=str, default='totaltext')\n parser.add_argument(\"--e2e_pgnet_mode\", type=str, default='fast')\n\n # params for text classifier\n parser.add_argument(\"--use_angle_cls\", type=str2bool, default=False)\n parser.add_argument(\"--cls_model_dir\", type=str)\n parser.add_argument(\"--cls_image_shape\", type=str, default=\"3, 48, 192\")\n parser.add_argument(\"--label_list\", type=list, default=['0', '180'])\n parser.add_argument(\"--cls_batch_num\", type=int, default=6)\n parser.add_argument(\"--cls_thresh\", type=float, default=0.9)\n\n parser.add_argument(\"--enable_mkldnn\", type=str2bool, default=False)\n parser.add_argument(\"--cpu_threads\", type=int, default=10)\n parser.add_argument(\"--use_pdserving\", type=str2bool, default=False)\n parser.add_argument(\"--warmup\", type=str2bool, default=False)\n\n #\n parser.add_argument(\n \"--draw_img_save_dir\", type=str, default=\"./inference_results\")\n parser.add_argument(\"--save_crop_res\", type=str2bool, default=False)\n parser.add_argument(\"--crop_res_save_dir\", type=str, default=\"./output\")\n\n # multi-process\n parser.add_argument(\"--use_mp\", type=str2bool, default=False)\n parser.add_argument(\"--total_process_num\", type=int, default=1)\n parser.add_argument(\"--process_id\", type=int, default=0)\n\n parser.add_argument(\"--benchmark\", type=str2bool, default=False)\n parser.add_argument(\"--save_log_path\", type=str, default=\"./log_output/\")\n\n parser.add_argument(\"--show_log\", type=str2bool, default=True)\n parser.add_argument(\"--use_onnx\", type=str2bool, default=False)\n return parser\n\n\ndef parse_args():\n parser = init_args()\n return parser.parse_args()\n\n\ndef create_predictor(args, mode, logger):\n if mode == \"det\":\n model_dir = args.det_model_dir\n elif mode == 'cls':\n model_dir = args.cls_model_dir\n elif mode == 'rec':\n model_dir = args.rec_model_dir\n elif mode == 'table':\n model_dir = args.table_model_dir\n else:\n model_dir = args.e2e_model_dir\n\n if model_dir is None:\n logger.info(\"not find {} model file path {}\".format(mode, model_dir))\n sys.exit(0)\n if args.use_onnx:\n import onnxruntime as ort\n model_file_path = model_dir\n if not os.path.exists(model_file_path):\n raise ValueError(\"not find model file path {}\".format(\n model_file_path))\n sess = ort.InferenceSession(model_file_path)\n return sess, sess.get_inputs()[0], None, None\n\n else:\n model_file_path = model_dir + \"/inference.pdmodel\"\n params_file_path = model_dir + \"/inference.pdiparams\"\n if not os.path.exists(model_file_path):\n raise ValueError(\"not find model file path {}\".format(\n model_file_path))\n if not os.path.exists(params_file_path):\n raise ValueError(\"not find params file path {}\".format(\n params_file_path))\n\n config = inference.Config(model_file_path, params_file_path)\n\n if hasattr(args, 'precision'):\n if args.precision == \"fp16\" and args.use_tensorrt:\n precision = inference.PrecisionType.Half\n elif args.precision == \"int8\":\n precision = inference.PrecisionType.Int8\n else:\n precision = inference.PrecisionType.Float32\n else:\n precision = inference.PrecisionType.Float32\n\n if args.use_gpu:\n gpu_id = get_infer_gpuid()\n if gpu_id is None:\n logger.warning(\n \"GPU is not found in current device by nvidia-smi. Please check your device or ignore it if run on jeston.\"\n )\n config.enable_use_gpu(args.gpu_mem, 0)\n if args.use_tensorrt:\n config.enable_tensorrt_engine(\n workspace_size=1 << 30,\n precision_mode=precision,\n max_batch_size=args.max_batch_size,\n min_subgraph_size=args.min_subgraph_size)\n # skip the minmum trt subgraph\n use_dynamic_shape = True\n if mode == \"det\":\n min_input_shape = {\n \"x\": [1, 3, 50, 50],\n \"conv2d_92.tmp_0\": [1, 120, 20, 20],\n \"conv2d_91.tmp_0\": [1, 24, 10, 10],\n \"conv2d_59.tmp_0\": [1, 96, 20, 20],\n \"nearest_interp_v2_1.tmp_0\": [1, 256, 10, 10],\n \"nearest_interp_v2_2.tmp_0\": [1, 256, 20, 20],\n \"conv2d_124.tmp_0\": [1, 256, 20, 20],\n \"nearest_interp_v2_3.tmp_0\": [1, 64, 20, 20],\n \"nearest_interp_v2_4.tmp_0\": [1, 64, 20, 20],\n \"nearest_interp_v2_5.tmp_0\": [1, 64, 20, 20],\n \"elementwise_add_7\": [1, 56, 2, 2],\n \"nearest_interp_v2_0.tmp_0\": [1, 256, 2, 2]\n }\n max_input_shape = {\n \"x\": [1, 3, 1536, 1536],\n \"conv2d_92.tmp_0\": [1, 120, 400, 400],\n \"conv2d_91.tmp_0\": [1, 24, 200, 200],\n \"conv2d_59.tmp_0\": [1, 96, 400, 400],\n \"nearest_interp_v2_1.tmp_0\": [1, 256, 200, 200],\n \"conv2d_124.tmp_0\": [1, 256, 400, 400],\n \"nearest_interp_v2_2.tmp_0\": [1, 256, 400, 400],\n \"nearest_interp_v2_3.tmp_0\": [1, 64, 400, 400],\n \"nearest_interp_v2_4.tmp_0\": [1, 64, 400, 400],\n \"nearest_interp_v2_5.tmp_0\": [1, 64, 400, 400],\n \"elementwise_add_7\": [1, 56, 400, 400],\n \"nearest_interp_v2_0.tmp_0\": [1, 256, 400, 400]\n }\n opt_input_shape = {\n \"x\": [1, 3, 640, 640],\n \"conv2d_92.tmp_0\": [1, 120, 160, 160],\n \"conv2d_91.tmp_0\": [1, 24, 80, 80],\n \"conv2d_59.tmp_0\": [1, 96, 160, 160],\n \"nearest_interp_v2_1.tmp_0\": [1, 256, 80, 80],\n \"nearest_interp_v2_2.tmp_0\": [1, 256, 160, 160],\n \"conv2d_124.tmp_0\": [1, 256, 160, 160],\n \"nearest_interp_v2_3.tmp_0\": [1, 64, 160, 160],\n \"nearest_interp_v2_4.tmp_0\": [1, 64, 160, 160],\n \"nearest_interp_v2_5.tmp_0\": [1, 64, 160, 160],\n \"elementwise_add_7\": [1, 56, 40, 40],\n \"nearest_interp_v2_0.tmp_0\": [1, 256, 40, 40]\n }\n min_pact_shape = {\n \"nearest_interp_v2_26.tmp_0\": [1, 256, 20, 20],\n \"nearest_interp_v2_27.tmp_0\": [1, 64, 20, 20],\n \"nearest_interp_v2_28.tmp_0\": [1, 64, 20, 20],\n \"nearest_interp_v2_29.tmp_0\": [1, 64, 20, 20]\n }\n max_pact_shape = {\n \"nearest_interp_v2_26.tmp_0\": [1, 256, 400, 400],\n \"nearest_interp_v2_27.tmp_0\": [1, 64, 400, 400],\n \"nearest_interp_v2_28.tmp_0\": [1, 64, 400, 400],\n \"nearest_interp_v2_29.tmp_0\": [1, 64, 400, 400]\n }\n opt_pact_shape = {\n \"nearest_interp_v2_26.tmp_0\": [1, 256, 160, 160],\n \"nearest_interp_v2_27.tmp_0\": [1, 64, 160, 160],\n \"nearest_interp_v2_28.tmp_0\": [1, 64, 160, 160],\n \"nearest_interp_v2_29.tmp_0\": [1, 64, 160, 160]\n }\n min_input_shape.update(min_pact_shape)\n max_input_shape.update(max_pact_shape)\n opt_input_shape.update(opt_pact_shape)\n elif mode == \"rec\":\n if args.rec_algorithm != \"CRNN\":\n use_dynamic_shape = False\n min_input_shape = {\"x\": [1, 3, 32, 10]}\n max_input_shape = {\"x\": [args.rec_batch_num, 3, 32, 1536]}\n opt_input_shape = {\"x\": [args.rec_batch_num, 3, 32, 320]}\n elif mode == \"cls\":\n min_input_shape = {\"x\": [1, 3, 48, 10]}\n max_input_shape = {\"x\": [args.rec_batch_num, 3, 48, 1024]}\n opt_input_shape = {\"x\": [args.rec_batch_num, 3, 48, 320]}\n else:\n use_dynamic_shape = False\n if use_dynamic_shape:\n config.set_trt_dynamic_shape_info(\n min_input_shape, max_input_shape, opt_input_shape)\n\n else:\n config.disable_gpu()\n if hasattr(args, \"cpu_threads\"):\n config.set_cpu_math_library_num_threads(args.cpu_threads)\n else:\n # default cpu threads as 10\n config.set_cpu_math_library_num_threads(10)\n if args.enable_mkldnn:\n # cache 10 different shapes for mkldnn to avoid memory leak\n config.set_mkldnn_cache_capacity(10)\n config.enable_mkldnn()\n if args.precision == \"fp16\":\n config.enable_mkldnn_bfloat16()\n # enable memory optim\n config.enable_memory_optim()\n config.disable_glog_info()\n\n config.delete_pass(\"conv_transpose_eltwiseadd_bn_fuse_pass\")\n if mode == 'table':\n config.delete_pass(\"fc_fuse_pass\") # not supported for table\n config.switch_use_feed_fetch_ops(False)\n config.switch_ir_optim(True)\n\n # create predictor\n predictor = inference.create_predictor(config)\n input_names = predictor.get_input_names()\n for name in input_names:\n input_tensor = predictor.get_input_handle(name)\n output_names = predictor.get_output_names()\n output_tensors = []\n for output_name in output_names:\n output_tensor = predictor.get_output_handle(output_name)\n output_tensors.append(output_tensor)\n return predictor, input_tensor, output_tensors, config\n\n\ndef get_infer_gpuid():\n if os.name == 'nt':\n try:\n return int(os.environ['CUDA_VISIBLE_DEVICES'].split(',')[0])\n except KeyError:\n return 0\n if not paddle.fluid.core.is_compiled_with_rocm():\n cmd = \"env | grep CUDA_VISIBLE_DEVICES\"\n else:\n cmd = \"env | grep HIP_VISIBLE_DEVICES\"\n env_cuda = os.popen(cmd).readlines()\n if len(env_cuda) == 0:\n return 0\n else:\n gpu_id = env_cuda[0].strip().split(\"=\")[1]\n return int(gpu_id[0])\n\n\ndef draw_e2e_res(dt_boxes, strs, img_path):\n src_im = cv2.imread(img_path)\n for box, str in zip(dt_boxes, strs):\n box = box.astype(np.int32).reshape((-1, 1, 2))\n cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)\n cv2.putText(\n src_im,\n str,\n org=(int(box[0, 0, 0]), int(box[0, 0, 1])),\n fontFace=cv2.FONT_HERSHEY_COMPLEX,\n fontScale=0.7,\n color=(0, 255, 0),\n thickness=1)\n return src_im\n\n\ndef draw_text_det_res(dt_boxes, img_path):\n src_im = cv2.imread(img_path)\n for box in dt_boxes:\n box = np.array(box).astype(np.int32).reshape(-1, 2)\n cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)\n return src_im\n\n\ndef resize_img(img, input_size=600):\n \"\"\"\n resize img and limit the longest side of the image to input_size\n \"\"\"\n img = np.array(img)\n im_shape = img.shape\n im_size_max = np.max(im_shape[0:2])\n im_scale = float(input_size) / float(im_size_max)\n img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)\n return img\n\n\ndef draw_ocr(image,\n boxes,\n txts=None,\n scores=None,\n drop_score=0.5,\n font_path=\"./doc/fonts/simfang.ttf\"):\n \"\"\"\n Visualize the results of OCR detection and recognition\n args:\n image(Image|array): RGB image\n boxes(list): boxes with shape(N, 4, 2)\n txts(list): the texts\n scores(list): txxs corresponding scores\n drop_score(float): only scores greater than drop_threshold will be visualized\n font_path: the path of font which is used to draw text\n return(array):\n the visualized img\n \"\"\"\n if scores is None:\n scores = [1] * len(boxes)\n box_num = len(boxes)\n for i in range(box_num):\n if scores is not None and (scores[i] < drop_score or\n math.isnan(scores[i])):\n continue\n box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)\n image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)\n if txts is not None:\n img = np.array(resize_img(image, input_size=600))\n txt_img = text_visual(\n txts,\n scores,\n img_h=img.shape[0],\n img_w=600,\n threshold=drop_score,\n font_path=font_path)\n img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)\n return img\n return image\n\n\ndef draw_ocr_box_txt(image,\n boxes,\n txts,\n scores=None,\n drop_score=0.5,\n font_path=\"./doc/simfang.ttf\"):\n h, w = image.height, image.width\n img_left = image.copy()\n img_right = Image.new('RGB', (w, h), (255, 255, 255))\n\n import random\n\n random.seed(0)\n draw_left = ImageDraw.Draw(img_left)\n draw_right = ImageDraw.Draw(img_right)\n for idx, (box, txt) in enumerate(zip(boxes, txts)):\n if scores is not None and scores[idx] < drop_score:\n continue\n color = (random.randint(0, 255), random.randint(0, 255),\n random.randint(0, 255))\n draw_left.polygon(box, fill=color)\n draw_right.polygon(\n [\n box[0][0], box[0][1], box[1][0], box[1][1], box[2][0],\n box[2][1], box[3][0], box[3][1]\n ],\n outline=color)\n box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][\n 1])**2)\n box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][\n 1])**2)\n if box_height > 2 * box_width:\n font_size = max(int(box_width * 0.9), 10)\n font = ImageFont.truetype(font_path, font_size, encoding=\"utf-8\")\n cur_y = box[0][1]\n for c in txt:\n char_size = font.getsize(c)\n draw_right.text(\n (box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)\n cur_y += char_size[1]\n else:\n font_size = max(int(box_height * 0.8), 10)\n font = ImageFont.truetype(font_path, font_size, encoding=\"utf-8\")\n draw_right.text(\n [box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)\n img_left = Image.blend(image, img_left, 0.5)\n img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))\n img_show.paste(img_left, (0, 0, w, h))\n img_show.paste(img_right, (w, 0, w * 2, h))\n return np.array(img_show)\n\n\ndef str_count(s):\n \"\"\"\n Count the number of Chinese characters,\n a single English character and a single number\n equal to half the length of Chinese characters.\n args:\n s(string): the input of string\n return(int):\n the number of Chinese characters\n \"\"\"\n import string\n count_zh = count_pu = 0\n s_len = len(s)\n en_dg_count = 0\n for c in s:\n if c in string.ascii_letters or c.isdigit() or c.isspace():\n en_dg_count += 1\n elif c.isalpha():\n count_zh += 1\n else:\n count_pu += 1\n return s_len - math.ceil(en_dg_count / 2)\n\n\ndef text_visual(texts,\n scores,\n img_h=400,\n img_w=600,\n threshold=0.,\n font_path=\"./doc/simfang.ttf\"):\n \"\"\"\n create new blank img and draw txt on it\n args:\n texts(list): the text will be draw\n scores(list|None): corresponding score of each txt\n img_h(int): the height of blank img\n img_w(int): the width of blank img\n font_path: the path of font which is used to draw text\n return(array):\n \"\"\"\n if scores is not None:\n assert len(texts) == len(\n scores), \"The number of txts and corresponding scores must match\"\n\n def create_blank_img():\n blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255\n blank_img[:, img_w - 1:] = 0\n blank_img = Image.fromarray(blank_img).convert(\"RGB\")\n draw_txt = ImageDraw.Draw(blank_img)\n return blank_img, draw_txt\n\n blank_img, draw_txt = create_blank_img()\n\n font_size = 20\n txt_color = (0, 0, 0)\n font = ImageFont.truetype(font_path, font_size, encoding=\"utf-8\")\n\n gap = font_size + 5\n txt_img_list = []\n count, index = 1, 0\n for idx, txt in enumerate(texts):\n index += 1\n if scores[idx] < threshold or math.isnan(scores[idx]):\n index -= 1\n continue\n first_line = True\n while str_count(txt) >= img_w // font_size - 4:\n tmp = txt\n txt = tmp[:img_w // font_size - 4]\n if first_line:\n new_txt = str(index) + ': ' + txt\n first_line = False\n else:\n new_txt = ' ' + txt\n draw_txt.text((0, gap * count), new_txt, txt_color, font=font)\n txt = tmp[img_w // font_size - 4:]\n if count >= img_h // gap - 1:\n txt_img_list.append(np.array(blank_img))\n blank_img, draw_txt = create_blank_img()\n count = 0\n count += 1\n if first_line:\n new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])\n else:\n new_txt = \" \" + txt + \" \" + '%.3f' % (scores[idx])\n draw_txt.text((0, gap * count), new_txt, txt_color, font=font)\n # whether add new blank img or not\n if count >= img_h // gap - 1 and idx + 1 < len(texts):\n txt_img_list.append(np.array(blank_img))\n blank_img, draw_txt = create_blank_img()\n count = 0\n count += 1\n txt_img_list.append(np.array(blank_img))\n if len(txt_img_list) == 1:\n blank_img = np.array(txt_img_list[0])\n else:\n blank_img = np.concatenate(txt_img_list, axis=1)\n return np.array(blank_img)\n\n\ndef base64_to_cv2(b64str):\n import base64\n data = base64.b64decode(b64str.encode('utf8'))\n data = np.fromstring(data, np.uint8)\n data = cv2.imdecode(data, cv2.IMREAD_COLOR)\n return data\n\n\ndef draw_boxes(image, boxes, scores=None, drop_score=0.5):\n if scores is None:\n scores = [1] * len(boxes)\n for (box, score) in zip(boxes, scores):\n if score < drop_score:\n continue\n box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)\n image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)\n return image\n\n\ndef get_rotate_crop_image(img, points):\n '''\n img_height, img_width = img.shape[0:2]\n left = int(np.min(points[:, 0]))\n right = int(np.max(points[:, 0]))\n top = int(np.min(points[:, 1]))\n bottom = int(np.max(points[:, 1]))\n img_crop = img[top:bottom, left:right, :].copy()\n points[:, 0] = points[:, 0] - left\n points[:, 1] = points[:, 1] - top\n '''\n assert len(points) == 4, \"shape of points must be 4*2\"\n img_crop_width = int(\n max(\n np.linalg.norm(points[0] - points[1]),\n np.linalg.norm(points[2] - points[3])))\n img_crop_height = int(\n max(\n np.linalg.norm(points[0] - points[3]),\n np.linalg.norm(points[1] - points[2])))\n pts_std = np.float32([[0, 0], [img_crop_width, 0],\n [img_crop_width, img_crop_height],\n [0, img_crop_height]])\n M = cv2.getPerspectiveTransform(points, pts_std)\n dst_img = cv2.warpPerspective(\n img,\n M, (img_crop_width, img_crop_height),\n borderMode=cv2.BORDER_REPLICATE,\n flags=cv2.INTER_CUBIC)\n dst_img_height, dst_img_width = dst_img.shape[0:2]\n if dst_img_height * 1.0 / dst_img_width >= 1.5:\n dst_img = np.rot90(dst_img)\n return dst_img\n\n\ndef check_gpu(use_gpu):\n if use_gpu and not paddle.is_compiled_with_cuda():\n\n use_gpu = False\n return use_gpu\n\n\nif __name__ == '__main__':\n pass\n", "path": "tools/infer/utility.py" } ]
diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 21bbee098ef..33ed62125c0 100644 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -313,6 +313,11 @@ def create_predictor(args, mode, logger): def get_infer_gpuid(): + if os.name == 'nt': + try: + return int(os.environ['CUDA_VISIBLE_DEVICES'].split(',')[0]) + except KeyError: + return 0 if not paddle.fluid.core.is_compiled_with_rocm(): cmd = "env | grep CUDA_VISIBLE_DEVICES" else:
optuna__optuna-3339
Mention each tutorial page in API pages at least once <!-- Please write a clear and concise description of what content in https://optuna.readthedocs.io/ is an issue. --> [Optuna's tutorial](https://optuna.readthedocs.io/en/stable/tutorial/index.html) is a good source to understand Optuna's functionality with concrete examples. However, some tutorial pages might not be mentioned in corresponding Optuna's API pages. ## Description Please add a `note section` or `see also section` to mention the following tutorial pages. - ~[ ] [Lightweight, versatile, and platform agnostic architecture](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/001_first.html)~ - ~this is is a little bit conceptual page, so it might not be necessary to be linked.~ - [x] [Pythonic Search Space](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html) - [x] [Efficient Optimization Algorithms](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/003_efficient_optimization_algorithms.html) - [x] [Easy Parallelization](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/004_distributed.html) - [x] [Quick Visualization for Hyperparameter Optimization Analysis](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/005_visualization.html) - linked from https://optuna.readthedocs.io/en/stable/reference/visualization/index.html - [x] [Saving/Resuming Study with RDB Backend](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/001_rdb.html) - linked from https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html#optuna.study.create_study - [x] [Multi-objective Optimization with Optuna] (https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/002_multi_objective.html) https://github.com/optuna/optuna/pull/3339 - [x] [User Attributes](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/003_attributes.html) - [x] [Command-Line Interface](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/004_cli.html) - linked from https://optuna.readthedocs.io/en/stable/reference/index.html - [x] [User-Defined Sampler](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/005_user_defined_sampler.html) - linked from https://optuna.readthedocs.io/en/stable/reference/samplers.html - [x] [User-Defined Pruner](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/006_user_defined_pruner.html) - linked from https://optuna.readthedocs.io/en/stable/reference/pruners.html - [x] [Callback for Study.optimize](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/007_optuna_callback.html) - linked from `callbacks`'s section in https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize - [ ] [Specify Hyperparameters Manually](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/008_specify_params.html) - [x] [the first section](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) can be linked from `Study.enqueue_trial` - [ ] [the second section](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/008_specify_params.html#second-scenario-have-optuna-utilize-already-evaluated-hyperparameters) can be linked from `Study.add_trial` (see https://github.com/optuna/optuna/pull/3346) - [x] [Ask-and-Tell Interface](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/009_ask_and_tell.html) - [x] [Re-use the best values](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/010_reuse_best_trial.html) https://github.com/optuna/optuna/pull/3396
[ { "content": "import json\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\n\nimport optuna\nfrom optuna._experimental import experimental\nfrom optuna.study import Study\nfrom optuna.study._multi_objective import _get_pareto_front_trials_by_trials\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._plotly_imports import _imports\n\n\nif _imports.is_successful():\n from optuna.visualization._plotly_imports import go\n\n_logger = optuna.logging.get_logger(__name__)\n\n\n@experimental(\"2.4.0\")\ndef plot_pareto_front(\n study: Study,\n *,\n target_names: Optional[List[str]] = None,\n include_dominated_trials: bool = True,\n axis_order: Optional[List[int]] = None,\n constraints_func: Optional[Callable[[FrozenTrial], Sequence[float]]] = None,\n) -> \"go.Figure\":\n \"\"\"Plot the Pareto front of a study.\n\n Example:\n\n The following code snippet shows how to plot the Pareto front of a study.\n\n .. plotly::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", 0, 5)\n y = trial.suggest_float(\"y\", 0, 3)\n\n v0 = 4 * x ** 2 + 4 * y ** 2\n v1 = (x - 5) ** 2 + (y - 5) ** 2\n return v0, v1\n\n\n study = optuna.create_study(directions=[\"minimize\", \"minimize\"])\n study.optimize(objective, n_trials=50)\n\n fig = optuna.visualization.plot_pareto_front(study)\n fig.show()\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their objective\n values.\n target_names:\n Objective name list used as the axis titles. If :obj:`None` is specified,\n \"Objective {objective_index}\" is used instead.\n include_dominated_trials:\n A flag to include all dominated trial's objective values.\n axis_order:\n A list of indices indicating the axis order. If :obj:`None` is specified,\n default order is used.\n constraints_func:\n An optional function that computes the objective constraints. It must take a\n :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must\n be a sequence of :obj:`float` s. A value strictly larger than 0 means that a\n constraint is violated. A value equal to or smaller than 0 is considered feasible.\n This specification is the same as in, for example,\n :class:`~optuna.integration.NSGAIISampler`.\n\n If given, trials are classified into three categories: feasible and best, feasible but\n non-best, and infeasible. Categories are shown in different colors. Here, whether a\n trial is best (on Pareto front) or not is determined ignoring all infeasible trials.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n\n Raises:\n :exc:`ValueError`:\n If the number of objectives of ``study`` isn't 2 or 3.\n \"\"\"\n\n _imports.check()\n\n n_dim = len(study.directions)\n if n_dim not in (2, 3):\n raise ValueError(\"`plot_pareto_front` function only supports 2 or 3 objective studies.\")\n\n if target_names is None:\n target_names = [f\"Objective {i}\" for i in range(n_dim)]\n elif len(target_names) != n_dim:\n raise ValueError(f\"The length of `target_names` is supposed to be {n_dim}.\")\n\n if constraints_func is not None:\n feasible_trials = []\n infeasible_trials = []\n for trial in study.get_trials(states=(TrialState.COMPLETE,)):\n if all(map(lambda x: x <= 0.0, constraints_func(trial))):\n feasible_trials.append(trial)\n else:\n infeasible_trials.append(trial)\n best_trials = _get_pareto_front_trials_by_trials(feasible_trials, study.directions)\n if include_dominated_trials:\n non_best_trials = _get_non_pareto_front_trials(feasible_trials, best_trials)\n else:\n non_best_trials = []\n\n if len(best_trials) == 0:\n _logger.warning(\"Your study does not have any completed and feasible trials.\")\n else:\n best_trials = study.best_trials\n if len(best_trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n\n if include_dominated_trials:\n non_best_trials = _get_non_pareto_front_trials(\n study.get_trials(deepcopy=False), best_trials\n )\n else:\n non_best_trials = []\n infeasible_trials = []\n\n if axis_order is None:\n axis_order = list(range(n_dim))\n else:\n if len(axis_order) != n_dim:\n raise ValueError(\n f\"Size of `axis_order` {axis_order}. Expect: {n_dim}, Actual: {len(axis_order)}.\"\n )\n if len(set(axis_order)) != n_dim:\n raise ValueError(f\"Elements of given `axis_order` {axis_order} are not unique!.\")\n if max(axis_order) > n_dim - 1:\n raise ValueError(\n f\"Given `axis_order` {axis_order} contains invalid index {max(axis_order)} \"\n f\"higher than {n_dim - 1}.\"\n )\n if min(axis_order) < 0:\n raise ValueError(\n f\"Given `axis_order` {axis_order} contains invalid index {min(axis_order)} \"\n \"lower than 0.\"\n )\n\n def _make_scatter_object(\n trials: Sequence[FrozenTrial],\n hovertemplate: str,\n infeasible: bool = False,\n dominated_trials: bool = False,\n ) -> Union[\"go.Scatter\", \"go.Scatter3d\"]:\n return _make_scatter_object_base(\n n_dim,\n trials,\n axis_order, # type: ignore\n include_dominated_trials,\n hovertemplate=hovertemplate,\n infeasible=infeasible,\n dominated_trials=dominated_trials,\n )\n\n if constraints_func is None:\n data = [\n _make_scatter_object(\n non_best_trials,\n hovertemplate=\"%{text}<extra>Trial</extra>\",\n dominated_trials=True,\n ),\n _make_scatter_object(\n best_trials,\n hovertemplate=\"%{text}<extra>Best Trial</extra>\",\n dominated_trials=False,\n ),\n ]\n else:\n data = [\n _make_scatter_object(\n infeasible_trials,\n hovertemplate=\"%{text}<extra>Infeasible Trial</extra>\",\n infeasible=True,\n ),\n _make_scatter_object(\n non_best_trials,\n hovertemplate=\"%{text}<extra>Feasible Trial</extra>\",\n dominated_trials=True,\n ),\n _make_scatter_object(\n best_trials,\n hovertemplate=\"%{text}<extra>Best Trial</extra>\",\n dominated_trials=False,\n ),\n ]\n\n if n_dim == 2:\n layout = go.Layout(\n title=\"Pareto-front Plot\",\n xaxis_title=target_names[axis_order[0]],\n yaxis_title=target_names[axis_order[1]],\n )\n else:\n layout = go.Layout(\n title=\"Pareto-front Plot\",\n scene={\n \"xaxis_title\": target_names[axis_order[0]],\n \"yaxis_title\": target_names[axis_order[1]],\n \"zaxis_title\": target_names[axis_order[2]],\n },\n )\n return go.Figure(data=data, layout=layout)\n\n\ndef _get_non_pareto_front_trials(\n trials: List[FrozenTrial], pareto_trials: List[FrozenTrial]\n) -> List[FrozenTrial]:\n\n non_pareto_trials = []\n for trial in trials:\n if trial.state == TrialState.COMPLETE and trial not in pareto_trials:\n non_pareto_trials.append(trial)\n return non_pareto_trials\n\n\ndef _make_json_compatible(value: Any) -> Any:\n try:\n json.dumps(value)\n return value\n except TypeError:\n # the value can't be converted to JSON directly, so return a string representation\n return str(value)\n\n\ndef _make_scatter_object_base(\n n_dim: int,\n trials: Sequence[FrozenTrial],\n axis_order: List[int],\n include_dominated_trials: bool,\n hovertemplate: str,\n infeasible: bool = False,\n dominated_trials: bool = False,\n) -> Union[\"go.Scatter\", \"go.Scatter3d\"]:\n assert n_dim in (2, 3)\n marker = _make_marker(\n trials,\n include_dominated_trials,\n dominated_trials=dominated_trials,\n infeasible=infeasible,\n )\n if n_dim == 2:\n return go.Scatter(\n x=[t.values[axis_order[0]] for t in trials],\n y=[t.values[axis_order[1]] for t in trials],\n text=[_make_hovertext(t) for t in trials],\n mode=\"markers\",\n hovertemplate=hovertemplate,\n marker=marker,\n showlegend=False,\n )\n else:\n assert n_dim == 3\n return go.Scatter3d(\n x=[t.values[axis_order[0]] for t in trials],\n y=[t.values[axis_order[1]] for t in trials],\n z=[t.values[axis_order[2]] for t in trials],\n text=[_make_hovertext(t) for t in trials],\n mode=\"markers\",\n hovertemplate=hovertemplate,\n marker=marker,\n showlegend=False,\n )\n\n\ndef _make_hovertext(trial: FrozenTrial) -> str:\n user_attrs = {key: _make_json_compatible(value) for key, value in trial.user_attrs.items()}\n user_attrs_dict = {\"user_attrs\": user_attrs} if user_attrs else {}\n text = json.dumps(\n {\n \"number\": trial.number,\n \"values\": trial.values,\n \"params\": trial.params,\n **user_attrs_dict,\n },\n indent=2,\n )\n return text.replace(\"\\n\", \"<br>\")\n\n\ndef _make_marker(\n trials: Sequence[FrozenTrial],\n include_dominated_trials: bool,\n dominated_trials: bool = False,\n infeasible: bool = False,\n) -> Dict[str, Any]:\n if dominated_trials and not include_dominated_trials:\n assert len(trials) == 0\n\n if infeasible:\n return {\n \"color\": \"#cccccc\",\n }\n elif dominated_trials:\n return {\n \"line\": {\"width\": 0.5, \"color\": \"Grey\"},\n \"color\": [t.number for t in trials],\n \"colorscale\": \"Blues\",\n \"colorbar\": {\n \"title\": \"#Trials\",\n },\n }\n else:\n return {\n \"line\": {\"width\": 0.5, \"color\": \"Grey\"},\n \"color\": [t.number for t in trials],\n \"colorscale\": \"Reds\",\n \"colorbar\": {\n \"title\": \"#Best trials\",\n \"x\": 1.1 if include_dominated_trials else 1,\n \"xpad\": 40,\n },\n }\n", "path": "optuna/visualization/_pareto_front.py" } ]
[ { "content": "import json\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\n\nimport optuna\nfrom optuna._experimental import experimental\nfrom optuna.study import Study\nfrom optuna.study._multi_objective import _get_pareto_front_trials_by_trials\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._plotly_imports import _imports\n\n\nif _imports.is_successful():\n from optuna.visualization._plotly_imports import go\n\n_logger = optuna.logging.get_logger(__name__)\n\n\n@experimental(\"2.4.0\")\ndef plot_pareto_front(\n study: Study,\n *,\n target_names: Optional[List[str]] = None,\n include_dominated_trials: bool = True,\n axis_order: Optional[List[int]] = None,\n constraints_func: Optional[Callable[[FrozenTrial], Sequence[float]]] = None,\n) -> \"go.Figure\":\n \"\"\"Plot the Pareto front of a study.\n\n .. seealso::\n Please refer to :ref:`multi_objective` for the tutorial of the Pareto front visualization.\n\n Example:\n\n The following code snippet shows how to plot the Pareto front of a study.\n\n .. plotly::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", 0, 5)\n y = trial.suggest_float(\"y\", 0, 3)\n\n v0 = 4 * x ** 2 + 4 * y ** 2\n v1 = (x - 5) ** 2 + (y - 5) ** 2\n return v0, v1\n\n\n study = optuna.create_study(directions=[\"minimize\", \"minimize\"])\n study.optimize(objective, n_trials=50)\n\n fig = optuna.visualization.plot_pareto_front(study)\n fig.show()\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their objective\n values.\n target_names:\n Objective name list used as the axis titles. If :obj:`None` is specified,\n \"Objective {objective_index}\" is used instead.\n include_dominated_trials:\n A flag to include all dominated trial's objective values.\n axis_order:\n A list of indices indicating the axis order. If :obj:`None` is specified,\n default order is used.\n constraints_func:\n An optional function that computes the objective constraints. It must take a\n :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must\n be a sequence of :obj:`float` s. A value strictly larger than 0 means that a\n constraint is violated. A value equal to or smaller than 0 is considered feasible.\n This specification is the same as in, for example,\n :class:`~optuna.integration.NSGAIISampler`.\n\n If given, trials are classified into three categories: feasible and best, feasible but\n non-best, and infeasible. Categories are shown in different colors. Here, whether a\n trial is best (on Pareto front) or not is determined ignoring all infeasible trials.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n\n Raises:\n :exc:`ValueError`:\n If the number of objectives of ``study`` isn't 2 or 3.\n \"\"\"\n\n _imports.check()\n\n n_dim = len(study.directions)\n if n_dim not in (2, 3):\n raise ValueError(\"`plot_pareto_front` function only supports 2 or 3 objective studies.\")\n\n if target_names is None:\n target_names = [f\"Objective {i}\" for i in range(n_dim)]\n elif len(target_names) != n_dim:\n raise ValueError(f\"The length of `target_names` is supposed to be {n_dim}.\")\n\n if constraints_func is not None:\n feasible_trials = []\n infeasible_trials = []\n for trial in study.get_trials(states=(TrialState.COMPLETE,)):\n if all(map(lambda x: x <= 0.0, constraints_func(trial))):\n feasible_trials.append(trial)\n else:\n infeasible_trials.append(trial)\n best_trials = _get_pareto_front_trials_by_trials(feasible_trials, study.directions)\n if include_dominated_trials:\n non_best_trials = _get_non_pareto_front_trials(feasible_trials, best_trials)\n else:\n non_best_trials = []\n\n if len(best_trials) == 0:\n _logger.warning(\"Your study does not have any completed and feasible trials.\")\n else:\n best_trials = study.best_trials\n if len(best_trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n\n if include_dominated_trials:\n non_best_trials = _get_non_pareto_front_trials(\n study.get_trials(deepcopy=False), best_trials\n )\n else:\n non_best_trials = []\n infeasible_trials = []\n\n if axis_order is None:\n axis_order = list(range(n_dim))\n else:\n if len(axis_order) != n_dim:\n raise ValueError(\n f\"Size of `axis_order` {axis_order}. Expect: {n_dim}, Actual: {len(axis_order)}.\"\n )\n if len(set(axis_order)) != n_dim:\n raise ValueError(f\"Elements of given `axis_order` {axis_order} are not unique!.\")\n if max(axis_order) > n_dim - 1:\n raise ValueError(\n f\"Given `axis_order` {axis_order} contains invalid index {max(axis_order)} \"\n f\"higher than {n_dim - 1}.\"\n )\n if min(axis_order) < 0:\n raise ValueError(\n f\"Given `axis_order` {axis_order} contains invalid index {min(axis_order)} \"\n \"lower than 0.\"\n )\n\n def _make_scatter_object(\n trials: Sequence[FrozenTrial],\n hovertemplate: str,\n infeasible: bool = False,\n dominated_trials: bool = False,\n ) -> Union[\"go.Scatter\", \"go.Scatter3d\"]:\n return _make_scatter_object_base(\n n_dim,\n trials,\n axis_order, # type: ignore\n include_dominated_trials,\n hovertemplate=hovertemplate,\n infeasible=infeasible,\n dominated_trials=dominated_trials,\n )\n\n if constraints_func is None:\n data = [\n _make_scatter_object(\n non_best_trials,\n hovertemplate=\"%{text}<extra>Trial</extra>\",\n dominated_trials=True,\n ),\n _make_scatter_object(\n best_trials,\n hovertemplate=\"%{text}<extra>Best Trial</extra>\",\n dominated_trials=False,\n ),\n ]\n else:\n data = [\n _make_scatter_object(\n infeasible_trials,\n hovertemplate=\"%{text}<extra>Infeasible Trial</extra>\",\n infeasible=True,\n ),\n _make_scatter_object(\n non_best_trials,\n hovertemplate=\"%{text}<extra>Feasible Trial</extra>\",\n dominated_trials=True,\n ),\n _make_scatter_object(\n best_trials,\n hovertemplate=\"%{text}<extra>Best Trial</extra>\",\n dominated_trials=False,\n ),\n ]\n\n if n_dim == 2:\n layout = go.Layout(\n title=\"Pareto-front Plot\",\n xaxis_title=target_names[axis_order[0]],\n yaxis_title=target_names[axis_order[1]],\n )\n else:\n layout = go.Layout(\n title=\"Pareto-front Plot\",\n scene={\n \"xaxis_title\": target_names[axis_order[0]],\n \"yaxis_title\": target_names[axis_order[1]],\n \"zaxis_title\": target_names[axis_order[2]],\n },\n )\n return go.Figure(data=data, layout=layout)\n\n\ndef _get_non_pareto_front_trials(\n trials: List[FrozenTrial], pareto_trials: List[FrozenTrial]\n) -> List[FrozenTrial]:\n\n non_pareto_trials = []\n for trial in trials:\n if trial.state == TrialState.COMPLETE and trial not in pareto_trials:\n non_pareto_trials.append(trial)\n return non_pareto_trials\n\n\ndef _make_json_compatible(value: Any) -> Any:\n try:\n json.dumps(value)\n return value\n except TypeError:\n # the value can't be converted to JSON directly, so return a string representation\n return str(value)\n\n\ndef _make_scatter_object_base(\n n_dim: int,\n trials: Sequence[FrozenTrial],\n axis_order: List[int],\n include_dominated_trials: bool,\n hovertemplate: str,\n infeasible: bool = False,\n dominated_trials: bool = False,\n) -> Union[\"go.Scatter\", \"go.Scatter3d\"]:\n assert n_dim in (2, 3)\n marker = _make_marker(\n trials,\n include_dominated_trials,\n dominated_trials=dominated_trials,\n infeasible=infeasible,\n )\n if n_dim == 2:\n return go.Scatter(\n x=[t.values[axis_order[0]] for t in trials],\n y=[t.values[axis_order[1]] for t in trials],\n text=[_make_hovertext(t) for t in trials],\n mode=\"markers\",\n hovertemplate=hovertemplate,\n marker=marker,\n showlegend=False,\n )\n else:\n assert n_dim == 3\n return go.Scatter3d(\n x=[t.values[axis_order[0]] for t in trials],\n y=[t.values[axis_order[1]] for t in trials],\n z=[t.values[axis_order[2]] for t in trials],\n text=[_make_hovertext(t) for t in trials],\n mode=\"markers\",\n hovertemplate=hovertemplate,\n marker=marker,\n showlegend=False,\n )\n\n\ndef _make_hovertext(trial: FrozenTrial) -> str:\n user_attrs = {key: _make_json_compatible(value) for key, value in trial.user_attrs.items()}\n user_attrs_dict = {\"user_attrs\": user_attrs} if user_attrs else {}\n text = json.dumps(\n {\n \"number\": trial.number,\n \"values\": trial.values,\n \"params\": trial.params,\n **user_attrs_dict,\n },\n indent=2,\n )\n return text.replace(\"\\n\", \"<br>\")\n\n\ndef _make_marker(\n trials: Sequence[FrozenTrial],\n include_dominated_trials: bool,\n dominated_trials: bool = False,\n infeasible: bool = False,\n) -> Dict[str, Any]:\n if dominated_trials and not include_dominated_trials:\n assert len(trials) == 0\n\n if infeasible:\n return {\n \"color\": \"#cccccc\",\n }\n elif dominated_trials:\n return {\n \"line\": {\"width\": 0.5, \"color\": \"Grey\"},\n \"color\": [t.number for t in trials],\n \"colorscale\": \"Blues\",\n \"colorbar\": {\n \"title\": \"#Trials\",\n },\n }\n else:\n return {\n \"line\": {\"width\": 0.5, \"color\": \"Grey\"},\n \"color\": [t.number for t in trials],\n \"colorscale\": \"Reds\",\n \"colorbar\": {\n \"title\": \"#Best trials\",\n \"x\": 1.1 if include_dominated_trials else 1,\n \"xpad\": 40,\n },\n }\n", "path": "optuna/visualization/_pareto_front.py" } ]
diff --git a/optuna/visualization/_pareto_front.py b/optuna/visualization/_pareto_front.py index bbbd533099..a508b7b313 100644 --- a/optuna/visualization/_pareto_front.py +++ b/optuna/visualization/_pareto_front.py @@ -33,6 +33,9 @@ def plot_pareto_front( ) -> "go.Figure": """Plot the Pareto front of a study. + .. seealso:: + Please refer to :ref:`multi_objective` for the tutorial of the Pareto front visualization. + Example: The following code snippet shows how to plot the Pareto front of a study.
Parsl__parsl-208
Fix automatic IPP controller creation on OS X Automatic creation of the IPP controller fails on Mac. Here is a test which reproduces the problem: ``` (parsl_py36) [Anna-Woodards-MacBook-Pro] ~/ci/parsl/parsl/tests/test_data >python test_file_ipp.py sites [{'site': 'Local_IPP', 'auth': {'channel': None}, 'execution': {'executor': 'ipp', 'provider': 'local', 'block': {'initBlocks': 4}}}] site Local_IPP auth {'channel': None} channel None execution {'executor': 'ipp', 'provider': 'local', 'block': {'initBlocks': 4}} executor ipp provider local block {'initBlocks': 4} initBlocks 4 globals {'lazyErrors': True} lazyErrors True site Local_IPP auth {'channel': None} channel None execution {'executor': 'ipp', 'provider': 'local', 'block': {'initBlocks': 4}} executor ipp provider local block {'initBlocks': 4} initBlocks 4 SSH_AGENT_PID 676 TERM_PROGRAM iTerm.app TERM xterm-256color SHELL /bin/bash HISTSIZE 800000 TMPDIR /var/folders/nt/sny5t5bd3js4y0fwhpbv32p00000gn/T/ Apple_PubSub_Socket_Render /private/tmp/com.apple.launchd.rfUzAYzuml/Render TERM_PROGRAM_VERSION 3.1.20171019-nightly TERM_SESSION_ID w1t7p0:B2D31661-E89E-4EC4-B759-C3E7B02EB5E0 USER awoodard LD_LIBRARY_PATH :/Users/awoodard/local/lib COMMAND_MODE unix2003 SSH_AUTH_SOCK /var/folders/nt/sny5t5bd3js4y0fwhpbv32p00000gn/T//ssh-fF1v22munma8/agent.675 __CF_USER_TEXT_ENCODING 0x1F5:0x0:0x0 LSCOLORS exfxcxdxbxegedabagacad PATH /opt/local/bin:/opt/local/sbin:/Users/awoodard/software/anaconda3/envs/parsl_py36/bin:/Users/awoodard/software/anaconda3/bin:/Users/awoodard/.local/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/usr/local/git/bin:/Library/TeX/texbin:/usr/local/opt/fzf/bin:/usr/local/texlive/2016/bin/x86_64-darwin/ CONDA_PATH_BACKUP /Users/awoodard/software/anaconda3/bin:/Users/awoodard/.local/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/usr/local/git/bin:/Library/TeX/texbin:/usr/local/opt/fzf/bin:/usr/local/texlive/2016/bin/x86_64-darwin/ CONDA_PREFIX /Users/awoodard/software/anaconda3/envs/parsl_py36 PWD /Users/awoodard/ci/parsl/parsl/tests/test_data LANG en_US.utf-8 PYTHONSTARTUP /Users/awoodard/.pystartup ITERM_PROFILE dark colors XPC_FLAGS 0x0 PS1 (parsl_py36) [\h] \w > XPC_SERVICE_NAME 0 PARSL_TESTING True SHLVL 1 HOME /Users/awoodard COLORFGBG 15;0 CONDA_PS1_BACKUP [\h] \w > ITERM_SESSION_ID w1t7p0:B2D31661-E89E-4EC4-B759-C3E7B02EB5E0 LOGNAME awoodard LC_CTYPE en_US.UTF-8 FZF_DEFAULT_COMMAND rg --files --hidden --smartcase --glob "!.git/*" CONDA_DEFAULT_ENV parsl_py36 DISPLAY /private/tmp/com.apple.launchd.0fyi8nxfEK/org.macosforge.xquartz:0 SECURITYSESSIONID 186a8 HISTFILE /Users/awoodard/.bash_history COLORTERM truecolor OLDPWD /Users/awoodard/ci/parsl/parsl/tests _ /Users/awoodard/software/anaconda3/envs/parsl_py36/bin/python DEBUG:parsl.executors.ipp:Starting IpyParallelExecutor with provider:<Local Execution Provider for site:Local_IPP> DEBUG:parsl.executors.ipp:Launched block : 0:51031 DEBUG:parsl.executors.ipp:Launched block : 1:51032 DEBUG:parsl.executors.ipp:Launched block : 2:51034 DEBUG:parsl.executors.ipp:Launched block : 3:51036 DEBUG:parsl.executors.ipp:Starting executor DEBUG:parsl.execution_provider.provider_factory:Created executor : <IPP Executor for site:Local_IPP> DEBUG:parsl.dataflow.strategy:Scaling strategy: simple DEBUG:parsl.dataflow.dflow:Using executors: {'Local_IPP': <IPP Executor for site:Local_IPP>} INFO:parsl.dataflow.dflow:Task 0 submitted for App remote_side_bash_executor, waiting on tasks [] INFO:parsl.dataflow.dflow:Task 0 launched on site Local_IPP DEBUG:parsl.dataflow.dflow:Task 0 launched with AppFut:<AppFuture at 0x10ca7e5c0 state=pending> DEBUG:parsl.app.futures:Creating DataFuture with parent : <AppFuture at 0x10ca7e5c0 state=pending> DEBUG:parsl.app.futures:Filepath : cat_out.txt DEBUG:parsl.dataflow.strategy:Min:0 initBlocks:4 Max:10 DEBUG:parsl.dataflow.strategy:Tasks:1 Slots:4 Parallelism:0.75 ```
[ { "content": "import os\nimport time\nimport logging\nfrom ipyparallel import Client\n\nfrom parsl.executors.base import ParslExecutor\nfrom parsl.executors.errors import *\n\nlogger = logging.getLogger(__name__)\n\n\nclass IPyParallelExecutor(ParslExecutor):\n \"\"\"The IPython Parallel executor.\n\n This executor allows us to take advantage of multiple processes running locally\n or remotely via IPythonParallel's pilot execution system.\n\n .. note::\n Some deficiencies with this executor are:\n\n 1. Ipengine's execute one task at a time. This means one engine per core\n is necessary to exploit the full parallelism of a node.\n 2. No notion of remaining walltime.\n 3. Lack of throttling means tasks could be queued up on a worker.\n\n \"\"\"\n\n def compose_launch_cmd(self, filepath, engine_dir, container_image):\n \"\"\"Reads the json contents from filepath and uses that to compose the engine launch command.\n\n Args:\n filepath: Path to the engine file\n engine_dir : CWD for the engines .\n\n \"\"\"\n self.engine_file = os.path.expanduser(filepath)\n\n engine_json = None\n try:\n with open(self.engine_file, 'r') as f:\n engine_json = f.read()\n\n except OSError as e:\n logger.error(\"Could not open engine_json : \", self.engine_file)\n raise e\n\n return \"\"\"cd {0}\ncat <<EOF > ipengine.json\n{1}\nEOF\n\nmkdir -p '.ipengine_logs'\nipengine --file=ipengine.json &>> .ipengine_logs/$JOBNAME.log\n\"\"\".format(engine_dir, engine_json)\n\n def compose_containerized_launch_cmd(self, filepath, engine_dir, container_image):\n \"\"\"Reads the json contents from filepath and uses that to compose the engine launch command.\n\n Notes: Add this to the ipengine launch for debug logs :\n --log-to-file --debug\n Args:\n filepath (str): Path to the engine file\n engine_dir (str): CWD for the engines .\n container_image (str): The container to be used to launch workers\n \"\"\"\n self.engine_file = os.path.expanduser(filepath)\n\n engine_json = None\n try:\n with open(self.engine_file, 'r') as f:\n engine_json = f.read()\n\n except OSError as e:\n logger.error(\"Could not open engine_json : \", self.engine_file)\n raise e\n\n return \"\"\"cd {0}\ncat <<EOF > ipengine.json\n{1}\nEOF\n\nDOCKER_ID=$(docker create --network host {2} ipengine --file=/tmp/ipengine.json)\ndocker cp ipengine.json $DOCKER_ID:/tmp/ipengine.json\n\n# Copy current dir to the working directory\nDOCKER_CWD=$(docker image inspect --format='{{{{.Config.WorkingDir}}}}' {2})\ndocker cp -a . $DOCKER_ID:$DOCKER_CWD\ndocker start $DOCKER_ID\n\nat_exit() {{\n echo \"Caught SIGTERM/SIGKILL signal!\"\n docker stop $DOCKER_ID\n}}\n\ntrap at_exit SIGTERM SIGINT\nsleep infinity\n\"\"\".format(engine_dir, engine_json, container_image)\n\n def __init__(self, execution_provider=None,\n reuse_controller=True,\n engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',\n engine_dir='.',\n controller=None,\n config=None):\n \"\"\"Initialize the IPyParallel pool. The initialization takes all relevant parameters via KWargs.\n\n .. note::\n\n If initBlocks > 0, and a scalable execution_provider is attached, then the provider\n will be initialized here.\n\n Args:\n - self\n\n KWargs:\n - execution_provider (ExecutionProvider object)\n - reuse_controller (Bool) : If True ipp executor will attempt to connect to an available\n controller. Default: True\n - engine_json_file (str): Path to json engine file that will be used to compose ipp launch\n commands at scaling events. Default : '~/.ipython/profile_default/security/ipcontroller-engine.json'\n - engine_dir (str) : Alternative to above, specify the engine_dir\n - config (dict). Default: '.'\n \"\"\"\n self.controller = controller\n self.engine_file = engine_json_file\n self.client_file = None\n\n if self.controller:\n # Find the Client json\n self.client_file = self.controller.client_file\n self.engine_file = self.controller.engine_file\n\n if not os.path.exists(self.client_file):\n logger.debug(\"Waiting for {0}\".format(self.client_file))\n\n sleep_dur = 20 # 20 seconds\n for i in range(0, int(sleep_dur / 0.2)):\n time.sleep(0.2)\n if os.path.exists(self.client_file):\n break\n\n if not os.path.exists(self.client_file):\n raise Exception(\"Controller client file is missing at {0}\".format(self.client_file))\n\n self.executor = Client(url_file=self.client_file)\n self.config = config\n self.container_type = None\n self.container_image = None\n self.sitename = config['site'] if config else 'Static_IPP'\n # NOTE: Copying the config here only partially fixes the issue. There needs to be\n # multiple controllers launched by the factory, and each must have different jsons.\n # There could be timing issues here,\n # local_engine_json = \"{0}.{1}.engine.json\".format(self.config[\"site\"], int(time.time()))\n # copyfile(engine_json_file, local_engine_json)\n # if not os.path.exists(self.config[\"execution\"][\"script_dir\"]):\n # os.makedirs(self.config[\"execution\"][\"script_dir\"])\n\n command_composer = self.compose_launch_cmd\n\n if config and config[\"execution\"][\"container\"][\"type\"]:\n self.container_type = config[\"execution\"][\"container\"][\"type\"]\n self.container_image = config[\"execution\"][\"container\"][\"image\"]\n command_composer = self.compose_containerized_launch_cmd\n logger.info(\"Launching IPP with Docker:{0}\".format(self.container_image))\n\n self.launch_cmd = command_composer(self.engine_file, engine_dir, self.container_image)\n self.execution_provider = execution_provider\n self.engines = []\n\n if reuse_controller:\n # Reuse existing controller if one is available\n pass\n\n if execution_provider:\n self._scaling_enabled = True\n logger.debug(\"Starting IpyParallelExecutor with provider:%s\", execution_provider)\n try:\n for i in range(self.config[\"execution\"][\"block\"].get(\"initBlocks\", 1)):\n eng = self.execution_provider.submit(self.launch_cmd, 1)\n logger.debug(\"Launched block : {0}:{1}\".format(i, eng))\n if not eng:\n raise(ScalingFailed(self.execution_provider.sitename,\n \"Ipp executor failed to scale via execution_provider\"))\n self.engines.extend([eng])\n\n except Exception as e:\n logger.error(\"Scaling out failed : %s\", e)\n raise e\n\n else:\n self._scaling_enabled = False\n logger.debug(\"Starting IpyParallelExecutor with no provider\")\n\n self.lb_view = self.executor.load_balanced_view()\n logger.debug(\"Starting executor\")\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def submit(self, *args, **kwargs):\n \"\"\"Submits work to the thread pool.\n\n This method is simply pass through and behaves like a submit call as described\n here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n Returns:\n Future\n \"\"\"\n # logger.debug(\"Got args : %s,\", args)\n # logger.debug(\"Got kwargs : %s,\", kwargs)\n return self.lb_view.apply_async(*args, **kwargs)\n\n def scale_out(self, *args, **kwargs):\n \"\"\"Scales out the number of active workers by 1.\n\n This method is notImplemented for threads and will raise the error if called.\n\n \"\"\"\n if self.execution_provider:\n r = self.execution_provider.submit(self.launch_cmd, *args, **kwargs)\n self.engines.extend([r])\n else:\n logger.error(\"No execution provider available\")\n r = None\n\n return r\n\n def scale_in(self, blocks, *args, **kwargs):\n \"\"\"Scale in the number of active workers by 1.\n\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n \"\"\"\n status = dict(zip(self.engines, self.execution_provider.status(self.engines)))\n\n # This works for blocks=0\n to_kill = [engine for engine in status if status[engine] == \"RUNNING\"][:blocks]\n\n if self.execution_provider:\n r = self.execution_provider.cancel(to_kill, *args, **kwargs)\n else:\n logger.error(\"No execution provider available\")\n r = None\n\n return r\n\n def status(self):\n \"\"\"Returns the status of the executor via probing the execution providers.\"\"\"\n if self.execution_provider:\n status = self.execution_provider.status(self.engines)\n\n else:\n status = []\n\n return status\n\n def shutdown(self, hub=True, targets='all', block=False):\n \"\"\"Shutdown the executor, including all workers and controllers.\n\n The interface documentation for IPP is `here <http://ipyparallel.readthedocs.io/en/latest/api/ipyparallel.html#ipyparallel.Client.shutdown>`_\n\n Kwargs:\n - hub (Bool): Whether the hub should be shutdown, Default:True,\n - targets (list of ints| 'all'): List of engine id's to kill, Default:'all'\n - block (Bool): To block for confirmations or not\n\n Raises:\n NotImplementedError\n \"\"\"\n if self.controller:\n logger.debug(\"IPP:Shutdown sequence: Attempting controller kill\")\n self.controller.close()\n\n # We do not actually do executor.shutdown because\n # this blocks even when requested to not block, killing the\n # controller is more effective although impolite.\n # x = self.executor.shutdown(targets=targets,\n # hub=hub,\n # block=block)\n\n logger.debug(\"Done with executor shutdown\")\n return True\n\n def __repr__(self):\n return \"<IPP Executor for site:{0}>\".format(self.sitename)\n\n\nif __name__ == \"__main__\":\n\n pool1_config = {\"poolname\": \"pool1\",\n \"queue\": \"foo\"}\n", "path": "parsl/executors/ipp.py" } ]
[ { "content": "import os\nimport time\nimport logging\nfrom ipyparallel import Client\n\nfrom parsl.executors.base import ParslExecutor\nfrom parsl.executors.errors import *\n\nlogger = logging.getLogger(__name__)\n\n\nclass IPyParallelExecutor(ParslExecutor):\n \"\"\"The IPython Parallel executor.\n\n This executor allows us to take advantage of multiple processes running locally\n or remotely via IPythonParallel's pilot execution system.\n\n .. note::\n Some deficiencies with this executor are:\n\n 1. Ipengine's execute one task at a time. This means one engine per core\n is necessary to exploit the full parallelism of a node.\n 2. No notion of remaining walltime.\n 3. Lack of throttling means tasks could be queued up on a worker.\n\n \"\"\"\n\n def compose_launch_cmd(self, filepath, engine_dir, container_image):\n \"\"\"Reads the json contents from filepath and uses that to compose the engine launch command.\n\n Args:\n filepath: Path to the engine file\n engine_dir : CWD for the engines .\n\n \"\"\"\n self.engine_file = os.path.expanduser(filepath)\n\n engine_json = None\n try:\n with open(self.engine_file, 'r') as f:\n engine_json = f.read()\n\n except OSError as e:\n logger.error(\"Could not open engine_json : \", self.engine_file)\n raise e\n\n return \"\"\"cd {0}\ncat <<EOF > ipengine.json\n{1}\nEOF\n\nmkdir -p '.ipengine_logs'\nipengine --file=ipengine.json >> .ipengine_logs/$JOBNAME.log 2>&1\n\"\"\".format(engine_dir, engine_json)\n\n def compose_containerized_launch_cmd(self, filepath, engine_dir, container_image):\n \"\"\"Reads the json contents from filepath and uses that to compose the engine launch command.\n\n Notes: Add this to the ipengine launch for debug logs :\n --log-to-file --debug\n Args:\n filepath (str): Path to the engine file\n engine_dir (str): CWD for the engines .\n container_image (str): The container to be used to launch workers\n \"\"\"\n self.engine_file = os.path.expanduser(filepath)\n\n engine_json = None\n try:\n with open(self.engine_file, 'r') as f:\n engine_json = f.read()\n\n except OSError as e:\n logger.error(\"Could not open engine_json : \", self.engine_file)\n raise e\n\n return \"\"\"cd {0}\ncat <<EOF > ipengine.json\n{1}\nEOF\n\nDOCKER_ID=$(docker create --network host {2} ipengine --file=/tmp/ipengine.json)\ndocker cp ipengine.json $DOCKER_ID:/tmp/ipengine.json\n\n# Copy current dir to the working directory\nDOCKER_CWD=$(docker image inspect --format='{{{{.Config.WorkingDir}}}}' {2})\ndocker cp -a . $DOCKER_ID:$DOCKER_CWD\ndocker start $DOCKER_ID\n\nat_exit() {{\n echo \"Caught SIGTERM/SIGKILL signal!\"\n docker stop $DOCKER_ID\n}}\n\ntrap at_exit SIGTERM SIGINT\nsleep infinity\n\"\"\".format(engine_dir, engine_json, container_image)\n\n def __init__(self, execution_provider=None,\n reuse_controller=True,\n engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',\n engine_dir='.',\n controller=None,\n config=None):\n \"\"\"Initialize the IPyParallel pool. The initialization takes all relevant parameters via KWargs.\n\n .. note::\n\n If initBlocks > 0, and a scalable execution_provider is attached, then the provider\n will be initialized here.\n\n Args:\n - self\n\n KWargs:\n - execution_provider (ExecutionProvider object)\n - reuse_controller (Bool) : If True ipp executor will attempt to connect to an available\n controller. Default: True\n - engine_json_file (str): Path to json engine file that will be used to compose ipp launch\n commands at scaling events. Default : '~/.ipython/profile_default/security/ipcontroller-engine.json'\n - engine_dir (str) : Alternative to above, specify the engine_dir\n - config (dict). Default: '.'\n \"\"\"\n self.controller = controller\n self.engine_file = engine_json_file\n self.client_file = None\n\n if self.controller:\n # Find the Client json\n self.client_file = self.controller.client_file\n self.engine_file = self.controller.engine_file\n\n if not os.path.exists(self.client_file):\n logger.debug(\"Waiting for {0}\".format(self.client_file))\n\n sleep_dur = 20 # 20 seconds\n for i in range(0, int(sleep_dur / 0.2)):\n time.sleep(0.2)\n if os.path.exists(self.client_file):\n break\n\n if not os.path.exists(self.client_file):\n raise Exception(\"Controller client file is missing at {0}\".format(self.client_file))\n\n self.executor = Client(url_file=self.client_file)\n self.config = config\n self.container_type = None\n self.container_image = None\n self.sitename = config['site'] if config else 'Static_IPP'\n # NOTE: Copying the config here only partially fixes the issue. There needs to be\n # multiple controllers launched by the factory, and each must have different jsons.\n # There could be timing issues here,\n # local_engine_json = \"{0}.{1}.engine.json\".format(self.config[\"site\"], int(time.time()))\n # copyfile(engine_json_file, local_engine_json)\n # if not os.path.exists(self.config[\"execution\"][\"script_dir\"]):\n # os.makedirs(self.config[\"execution\"][\"script_dir\"])\n\n command_composer = self.compose_launch_cmd\n\n if config and config[\"execution\"][\"container\"][\"type\"]:\n self.container_type = config[\"execution\"][\"container\"][\"type\"]\n self.container_image = config[\"execution\"][\"container\"][\"image\"]\n command_composer = self.compose_containerized_launch_cmd\n logger.info(\"Launching IPP with Docker:{0}\".format(self.container_image))\n\n self.launch_cmd = command_composer(self.engine_file, engine_dir, self.container_image)\n self.execution_provider = execution_provider\n self.engines = []\n\n if reuse_controller:\n # Reuse existing controller if one is available\n pass\n\n if execution_provider:\n self._scaling_enabled = True\n logger.debug(\"Starting IpyParallelExecutor with provider:%s\", execution_provider)\n try:\n for i in range(self.config[\"execution\"][\"block\"].get(\"initBlocks\", 1)):\n eng = self.execution_provider.submit(self.launch_cmd, 1)\n logger.debug(\"Launched block : {0}:{1}\".format(i, eng))\n if not eng:\n raise(ScalingFailed(self.execution_provider.sitename,\n \"Ipp executor failed to scale via execution_provider\"))\n self.engines.extend([eng])\n\n except Exception as e:\n logger.error(\"Scaling out failed : %s\", e)\n raise e\n\n else:\n self._scaling_enabled = False\n logger.debug(\"Starting IpyParallelExecutor with no provider\")\n\n self.lb_view = self.executor.load_balanced_view()\n logger.debug(\"Starting executor\")\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def submit(self, *args, **kwargs):\n \"\"\"Submits work to the thread pool.\n\n This method is simply pass through and behaves like a submit call as described\n here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n Returns:\n Future\n \"\"\"\n # logger.debug(\"Got args : %s,\", args)\n # logger.debug(\"Got kwargs : %s,\", kwargs)\n return self.lb_view.apply_async(*args, **kwargs)\n\n def scale_out(self, *args, **kwargs):\n \"\"\"Scales out the number of active workers by 1.\n\n This method is notImplemented for threads and will raise the error if called.\n\n \"\"\"\n if self.execution_provider:\n r = self.execution_provider.submit(self.launch_cmd, *args, **kwargs)\n self.engines.extend([r])\n else:\n logger.error(\"No execution provider available\")\n r = None\n\n return r\n\n def scale_in(self, blocks, *args, **kwargs):\n \"\"\"Scale in the number of active workers by 1.\n\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n \"\"\"\n status = dict(zip(self.engines, self.execution_provider.status(self.engines)))\n\n # This works for blocks=0\n to_kill = [engine for engine in status if status[engine] == \"RUNNING\"][:blocks]\n\n if self.execution_provider:\n r = self.execution_provider.cancel(to_kill, *args, **kwargs)\n else:\n logger.error(\"No execution provider available\")\n r = None\n\n return r\n\n def status(self):\n \"\"\"Returns the status of the executor via probing the execution providers.\"\"\"\n if self.execution_provider:\n status = self.execution_provider.status(self.engines)\n\n else:\n status = []\n\n return status\n\n def shutdown(self, hub=True, targets='all', block=False):\n \"\"\"Shutdown the executor, including all workers and controllers.\n\n The interface documentation for IPP is `here <http://ipyparallel.readthedocs.io/en/latest/api/ipyparallel.html#ipyparallel.Client.shutdown>`_\n\n Kwargs:\n - hub (Bool): Whether the hub should be shutdown, Default:True,\n - targets (list of ints| 'all'): List of engine id's to kill, Default:'all'\n - block (Bool): To block for confirmations or not\n\n Raises:\n NotImplementedError\n \"\"\"\n if self.controller:\n logger.debug(\"IPP:Shutdown sequence: Attempting controller kill\")\n self.controller.close()\n\n # We do not actually do executor.shutdown because\n # this blocks even when requested to not block, killing the\n # controller is more effective although impolite.\n # x = self.executor.shutdown(targets=targets,\n # hub=hub,\n # block=block)\n\n logger.debug(\"Done with executor shutdown\")\n return True\n\n def __repr__(self):\n return \"<IPP Executor for site:{0}>\".format(self.sitename)\n\n\nif __name__ == \"__main__\":\n\n pool1_config = {\"poolname\": \"pool1\",\n \"queue\": \"foo\"}\n", "path": "parsl/executors/ipp.py" } ]
diff --git a/parsl/executors/ipp.py b/parsl/executors/ipp.py index 9863eb43e7..36e60cfa01 100644 --- a/parsl/executors/ipp.py +++ b/parsl/executors/ipp.py @@ -50,7 +50,7 @@ def compose_launch_cmd(self, filepath, engine_dir, container_image): EOF mkdir -p '.ipengine_logs' -ipengine --file=ipengine.json &>> .ipengine_logs/$JOBNAME.log +ipengine --file=ipengine.json >> .ipengine_logs/$JOBNAME.log 2>&1 """.format(engine_dir, engine_json) def compose_containerized_launch_cmd(self, filepath, engine_dir, container_image):
speechbrain__speechbrain-1261
Inconsistency between documentation and the code of evaluate() function in the brain class Hi I've noticed that there are some inconsistencies in the code and docs of the evaluate() method. The code shown here does not have a return value: https://speechbrain.readthedocs.io/en/latest/_modules/speechbrain/core.html#Brain.evaluate And in the docs https://speechbrain.readthedocs.io/en/latest/API/speechbrain.core.html#speechbrain.core.Brain, it is mentioned that it returns average loss. Inconsistency between documentation and the code of evaluate() function in the brain class Hi I've noticed that there are some inconsistencies in the code and docs of the evaluate() method. The code shown here does not have a return value: https://speechbrain.readthedocs.io/en/latest/_modules/speechbrain/core.html#Brain.evaluate And in the docs https://speechbrain.readthedocs.io/en/latest/API/speechbrain.core.html#speechbrain.core.Brain, it is mentioned that it returns average loss.
[ { "content": "\"\"\"Core SpeechBrain code for running experiments.\n\nAuthors\n * Peter Plantinga 2020\n * Abdel Heba 2020\n * Mirco Ravanelli 2020\n * Aku Rouhe 2021\n\"\"\"\n\nimport os\nimport sys\nimport yaml\nimport time\nimport torch\nimport shutil\nimport logging\nimport inspect\nimport pathlib\nimport argparse\nimport tempfile\nimport speechbrain as sb\nfrom datetime import date\nfrom enum import Enum, auto\nfrom tqdm.contrib import tqdm\nfrom types import SimpleNamespace\nfrom torch.nn import SyncBatchNorm\nfrom torch.utils.data import DataLoader\nfrom torch.nn import DataParallel as DP\nfrom torch.utils.data import IterableDataset\nfrom torch.utils.data import DistributedSampler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom hyperpyyaml import resolve_references\nfrom speechbrain.utils.distributed import run_on_main\nfrom speechbrain.dataio.dataloader import LoopedLoader\nfrom speechbrain.dataio.dataloader import SaveableDataLoader\nfrom speechbrain.dataio.sampler import DistributedSamplerWrapper\nfrom speechbrain.dataio.sampler import ReproducibleRandomSampler\n\nlogger = logging.getLogger(__name__)\nDEFAULT_LOG_CONFIG = os.path.dirname(os.path.abspath(__file__))\nDEFAULT_LOG_CONFIG = os.path.join(DEFAULT_LOG_CONFIG, \"log-config.yaml\")\ntorch._C._jit_set_profiling_executor(False)\ntorch._C._jit_set_profiling_mode(False)\nINTRA_EPOCH_CKPT_FLAG = \"brain_intra_epoch_ckpt\"\nPYTHON_VERSION_MAJOR = 3\nPYTHON_VERSION_MINOR = 7\n\n\ndef create_experiment_directory(\n experiment_directory,\n hyperparams_to_save=None,\n overrides={},\n log_config=DEFAULT_LOG_CONFIG,\n save_env_desc=True,\n):\n \"\"\"Create the output folder and relevant experimental files.\n\n Arguments\n ---------\n experiment_directory : str\n The place where the experiment directory should be created.\n hyperparams_to_save : str\n A filename of a yaml file representing the parameters for this\n experiment. If passed, references are resolved, and the result is\n written to a file in the experiment directory called \"hyperparams.yaml\".\n overrides : dict\n A mapping of replacements made in the yaml file, to save in yaml.\n log_config : str\n A yaml filename containing configuration options for the logger.\n save_env_desc : bool\n If True, an environment state description is saved to the experiment\n directory, in a file called env.log in the experiment directory.\n \"\"\"\n try:\n # all writing command must be done with the main_process\n if sb.utils.distributed.if_main_process():\n if not os.path.isdir(experiment_directory):\n os.makedirs(experiment_directory)\n\n # Write the parameters file\n if hyperparams_to_save is not None:\n hyperparams_filename = os.path.join(\n experiment_directory, \"hyperparams.yaml\"\n )\n with open(hyperparams_to_save) as f:\n resolved_yaml = resolve_references(f, overrides)\n with open(hyperparams_filename, \"w\") as w:\n print(\"# Generated %s from:\" % date.today(), file=w)\n print(\"# %s\" % os.path.abspath(hyperparams_to_save), file=w)\n print(\"# yamllint disable\", file=w)\n shutil.copyfileobj(resolved_yaml, w)\n\n # Copy executing file to output directory\n module = inspect.getmodule(inspect.currentframe().f_back)\n if module is not None:\n callingfile = os.path.realpath(module.__file__)\n shutil.copy(callingfile, experiment_directory)\n\n # Log exceptions to output automatically\n log_file = os.path.join(experiment_directory, \"log.txt\")\n logger_overrides = {\n \"handlers\": {\"file_handler\": {\"filename\": log_file}}\n }\n sb.utils.logger.setup_logging(log_config, logger_overrides)\n sys.excepthook = _logging_excepthook\n\n # Log beginning of experiment!\n logger.info(\"Beginning experiment!\")\n logger.info(f\"Experiment folder: {experiment_directory}\")\n\n # Save system description:\n if save_env_desc:\n description_str = sb.utils.logger.get_environment_description()\n with open(\n os.path.join(experiment_directory, \"env.log\"), \"w\"\n ) as fo:\n fo.write(description_str)\n finally:\n # wait for main_process if ddp is used\n sb.utils.distributed.ddp_barrier()\n\n\ndef _logging_excepthook(exc_type, exc_value, exc_traceback):\n \"\"\"Interrupt exception raising to log the error.\"\"\"\n logger.error(\"Exception:\", exc_info=(exc_type, exc_value, exc_traceback))\n\n\ndef parse_arguments(arg_list=None):\n r\"\"\"Parse command-line arguments to the experiment.\n\n Arguments\n ---------\n arg_list : list, None\n A list of arguments to parse. If not given, this is read from\n `sys.argv[1:]`\n\n Returns\n -------\n param_file : str\n The location of the parameters file.\n run_opts : dict\n Run options, such as distributed, device, etc.\n overrides : dict\n The overrides to pass to ``load_hyperpyyaml``.\n\n Example\n -------\n >>> argv = ['hyperparams.yaml', '--device', 'cuda:1', '--seed', '10']\n >>> filename, run_opts, overrides = parse_arguments(argv)\n >>> filename\n 'hyperparams.yaml'\n >>> run_opts[\"device\"]\n 'cuda:1'\n >>> overrides\n 'seed: 10'\n \"\"\"\n if arg_list is None:\n arg_list = sys.argv[1:]\n parser = argparse.ArgumentParser(description=\"Run a SpeechBrain experiment\")\n parser.add_argument(\n \"param_file\",\n type=str,\n help=\"A yaml-formatted file using the extended YAML syntax. \"\n \"defined by SpeechBrain.\",\n )\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"Run the experiment with only a few batches for all \"\n \"datasets, to ensure code runs without crashing.\",\n )\n parser.add_argument(\n \"--debug_batches\",\n type=int,\n default=2,\n help=\"Number of batches to run in debug mode.\",\n )\n parser.add_argument(\n \"--debug_epochs\",\n type=int,\n default=2,\n help=\"Number of epochs to run in debug mode. \"\n \"If a non-positive number is passed, all epochs are run.\",\n )\n parser.add_argument(\n \"--log_config\",\n type=str,\n help=\"A file storing the configuration options for logging\",\n )\n # if use_env = False in torch.distributed.lunch then local_rank arg is given\n parser.add_argument(\"--local_rank\", type=int, help=\"Rank on local machine\")\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"cuda:0\",\n help=\"The device to run the experiment on (e.g. 'cuda:0')\",\n )\n parser.add_argument(\n \"--data_parallel_backend\",\n default=False,\n action=\"store_true\",\n help=\"This flag enables training with data_parallel.\",\n )\n parser.add_argument(\n \"--distributed_launch\",\n default=False,\n action=\"store_true\",\n help=\"This flag enables training with DDP. Assumes script run with \"\n \"`torch.distributed.launch`\",\n )\n parser.add_argument(\n \"--distributed_backend\",\n type=str,\n default=\"nccl\",\n help=\"One of {nccl, gloo, mpi}\",\n )\n parser.add_argument(\n \"--find_unused_parameters\",\n default=False,\n action=\"store_true\",\n help=\"This flag disable unused parameters detection\",\n )\n parser.add_argument(\n \"--jit_module_keys\",\n type=str,\n nargs=\"*\",\n help=\"A list of keys in the 'modules' dict to jitify\",\n )\n parser.add_argument(\n \"--auto_mix_prec\",\n default=None,\n action=\"store_true\",\n help=\"This flag enables training with automatic mixed-precision.\",\n )\n parser.add_argument(\n \"--max_grad_norm\",\n type=float,\n help=\"Gradient norm will be clipped to this value, \"\n \"enter negative value to disable.\",\n )\n parser.add_argument(\n \"--nonfinite_patience\",\n type=int,\n help=\"Max number of batches per epoch to skip if loss is nonfinite.\",\n )\n parser.add_argument(\n \"--noprogressbar\",\n default=None,\n action=\"store_true\",\n help=\"This flag disables the data loop progressbars.\",\n )\n parser.add_argument(\n \"--ckpt_interval_minutes\",\n type=float,\n help=\"Amount of time between saving intra-epoch checkpoints \"\n \"in minutes. If non-positive, intra-epoch checkpoints are not saved.\",\n )\n parser.add_argument(\n \"--grad_accumulation_factor\",\n type=int,\n help=\"Number of batches to accumulate gradients before optimizer step\",\n )\n parser.add_argument(\n \"--optimizer_step_limit\",\n type=int,\n help=\"Number of optimizer steps to run. If not passed, all epochs are run.\",\n )\n\n # Accept extra args to override yaml\n run_opts, overrides = parser.parse_known_args(arg_list)\n\n # Ignore items that are \"None\", they were not passed\n run_opts = {k: v for k, v in vars(run_opts).items() if v is not None}\n\n param_file = run_opts[\"param_file\"]\n del run_opts[\"param_file\"]\n\n overrides = _convert_to_yaml(overrides)\n\n # Checking that DataParallel use the right number of GPU\n if run_opts[\"data_parallel_backend\"]:\n if torch.cuda.device_count() == 0:\n raise ValueError(\"You must have at least 1 GPU.\")\n\n # For DDP, the device args must equal to local_rank used by\n # torch.distributed.launch. If run_opts[\"local_rank\"] exists,\n # use os.environ[\"LOCAL_RANK\"]\n local_rank = None\n if \"local_rank\" in run_opts:\n local_rank = run_opts[\"local_rank\"]\n else:\n if \"LOCAL_RANK\" in os.environ and os.environ[\"LOCAL_RANK\"] != \"\":\n local_rank = int(os.environ[\"LOCAL_RANK\"])\n\n # force device arg to be the same as local_rank from torch.distributed.lunch\n if local_rank is not None and \"cuda\" in run_opts[\"device\"]:\n run_opts[\"device\"] = run_opts[\"device\"][:-1] + str(local_rank)\n\n return param_file, run_opts, overrides\n\n\ndef _convert_to_yaml(overrides):\n \"\"\"Convert args to yaml for overrides\"\"\"\n yaml_string = \"\"\n\n # Handle '--arg=val' type args\n joined_args = \"=\".join(overrides)\n split_args = joined_args.split(\"=\")\n\n for arg in split_args:\n if arg.startswith(\"--\"):\n yaml_string += \"\\n\" + arg[len(\"--\") :] + \":\"\n else:\n yaml_string += \" \" + arg\n\n return yaml_string.strip()\n\n\nclass Stage(Enum):\n \"\"\"Simple enum to track stage of experiments.\"\"\"\n\n TRAIN = auto()\n VALID = auto()\n TEST = auto()\n\n\[email protected]_checkpoint_hooks\nclass Brain:\n r\"\"\"Brain class abstracts away the details of data loops.\n\n The primary purpose of the `Brain` class is the implementation of\n the ``fit()`` method, which iterates epochs and datasets for the\n purpose of \"fitting\" a set of modules to a set of data.\n\n In order to use the ``fit()`` method, one should sub-class the ``Brain``\n class and override any methods for which the default behavior does not\n match the use case. For a simple use case (e.g., training a single model\n with a single dataset) the only methods that need to be overridden are:\n\n * ``compute_forward()``\n * ``compute_objectives()``\n\n The example below illustrates how overriding these two methods is done.\n\n For more complicated use cases, such as multiple modules that need to\n be updated, the following methods can be overridden:\n\n * ``fit_batch()``\n * ``evaluate_batch()``\n\n Arguments\n ---------\n modules : dict of str:torch.nn.Module pairs\n These modules are passed to the optimizer by default if they have\n trainable parameters, and will have ``train()``/``eval()`` called on them.\n opt_class : torch.optim class\n A torch optimizer constructor that has takes only the list of\n parameters (e.g. a lambda or partial function definition). By default,\n this will be passed all modules in ``modules`` at the\n beginning of the ``fit()`` method. This behavior can be changed\n by overriding the ``configure_optimizers()`` method.\n hparams : dict\n Each key:value pair should consist of a string key and a hyperparameter\n that is used within the overridden methods. These will\n be accessible via an ``hparams`` attribute, using \"dot\" notation:\n e.g., self.hparams.model(x).\n run_opts : dict\n A set of options to change the runtime environment, including\n\n debug (bool)\n If ``True``, this will only iterate a few batches for all\n datasets, to ensure code runs without crashing.\n debug_batches (int)\n Number of batches to run in debug mode, Default ``2``.\n debug_epochs (int)\n Number of epochs to run in debug mode, Default ``2``.\n If a non-positive number is passed, all epochs are run.\n jit_module_keys (list of str)\n List of keys in ``modules`` that should be jit compiled.\n distributed_backend (str)\n One of ``nccl``, ``gloo``, ``mpi``.\n device (str)\n The location for performing computations.\n auto_mix_prec (bool)\n If ``True``, automatic mixed-precision is used.\n Activate it only with cuda.\n max_grad_norm (float)\n Default implementation of ``fit_batch()`` uses\n ``clip_grad_norm_`` with this value. Default: ``5``.\n nonfinite_patience (int)\n Number of times to ignore non-finite losses before stopping.\n Default: ``3``.\n noprogressbar (bool)\n Whether to turn off progressbar when training. Default: ``False``.\n ckpt_interval_minutes (float)\n Amount of time between saving intra-epoch checkpoints,\n in minutes, default: ``15.0``. If non-positive, these are not saved.\n\n Typically in a script this comes from ``speechbrain.parse_args``, which\n has different defaults than Brain. If an option is not defined here\n (keep in mind that parse_args will inject some options by default),\n then the option is also searched for in hparams (by key).\n checkpointer : speechbrain.Checkpointer\n By default, this will be used to load checkpoints, and will have the\n optimizer added to continue training if interrupted.\n\n Example\n -------\n >>> from torch.optim import SGD\n >>> class SimpleBrain(Brain):\n ... def compute_forward(self, batch, stage):\n ... return self.modules.model(batch[0])\n ... def compute_objectives(self, predictions, batch, stage):\n ... return torch.nn.functional.l1_loss(predictions, batch[0])\n >>> model = torch.nn.Linear(in_features=10, out_features=10)\n >>> brain = SimpleBrain({\"model\": model}, opt_class=lambda x: SGD(x, 0.1))\n >>> brain.fit(range(1), ([torch.rand(10, 10), torch.rand(10, 10)],))\n \"\"\"\n\n def __init__( # noqa: C901\n self,\n modules=None,\n opt_class=None,\n hparams=None,\n run_opts=None,\n checkpointer=None,\n ):\n self.opt_class = opt_class\n self.checkpointer = checkpointer\n\n # Arguments passed via the run opts dictionary\n run_opt_defaults = {\n \"debug\": False,\n \"debug_batches\": 2,\n \"debug_epochs\": 2,\n \"device\": \"cpu\",\n \"data_parallel_backend\": False,\n \"distributed_launch\": False,\n \"distributed_backend\": \"nccl\",\n \"find_unused_parameters\": False,\n \"jit_module_keys\": None,\n \"auto_mix_prec\": False,\n \"max_grad_norm\": 5.0,\n \"nonfinite_patience\": 3,\n \"noprogressbar\": False,\n \"ckpt_interval_minutes\": 0,\n \"grad_accumulation_factor\": 1,\n \"optimizer_step_limit\": None,\n }\n\n for arg, default in run_opt_defaults.items():\n if run_opts is not None and arg in run_opts:\n if hparams is not None and arg in hparams:\n logger.info(\n \"Info: \"\n + arg\n + \" arg overridden by command line input to: \"\n + str(run_opts[arg])\n )\n setattr(self, arg, run_opts[arg])\n else:\n # If any arg from run_opt_defaults exist in hparams and\n # not in command line args \"run_opts\"\n if hparams is not None and arg in hparams:\n logger.info(\n \"Info: \" + arg + \" arg from hparam file is used\"\n )\n setattr(self, arg, hparams[arg])\n else:\n setattr(self, arg, default)\n\n # Check Python version\n if not (\n sys.version_info.major == PYTHON_VERSION_MAJOR\n and sys.version_info.minor >= PYTHON_VERSION_MINOR\n ):\n logger.warn(\n \"Detected Python \"\n + str(sys.version_info.major)\n + \".\"\n + str(sys.version_info.minor)\n + \". We suggest using SpeechBrain with Python >=\"\n + str(PYTHON_VERSION_MAJOR)\n + \".\"\n + str(PYTHON_VERSION_MINOR)\n )\n\n if self.data_parallel_backend and self.distributed_launch:\n sys.exit(\n \"To use data_parallel backend, start your script with:\\n\\t\"\n \"python experiment.py hyperparams.yaml \"\n \"--data_parallel_backend=True\"\n \"To use DDP backend, start your script with:\\n\\t\"\n \"python -m torch.distributed.lunch [args]\\n\"\n \"experiment.py hyperparams.yaml --distributed_launch=True \"\n \"--distributed_backend=nccl\"\n )\n\n # Switch to the right context\n if self.device == \"cuda\":\n torch.cuda.set_device(0)\n elif \"cuda\" in self.device:\n torch.cuda.set_device(int(self.device[-1]))\n\n # Put modules on the right device, accessible with dot notation\n self.modules = torch.nn.ModuleDict(modules).to(self.device)\n\n # Make hyperparams available with dot notation too\n if hparams is not None:\n self.hparams = SimpleNamespace(**hparams)\n\n # Checkpointer should point at a temporary directory in debug mode\n if (\n self.debug\n and self.checkpointer is not None\n and hasattr(self.checkpointer, \"checkpoints_dir\")\n ):\n tempdir = tempfile.TemporaryDirectory()\n logger.info(\n \"Since debug mode is active, switching checkpointer \"\n f\"output to temporary directory: {tempdir.name}\"\n )\n self.checkpointer.checkpoints_dir = pathlib.Path(tempdir.name)\n\n # Keep reference to tempdir as long as checkpointer exists\n self.checkpointer.tempdir = tempdir\n\n # Sampler should be handled by `make_dataloader`\n # or if you provide a DataLoader directly, you can set\n # this.train_sampler = your_sampler\n # to have your_sampler.set_epoch() called on each epoch.\n self.train_sampler = None\n\n # Automatic mixed precision init\n if self.auto_mix_prec:\n self.scaler = torch.cuda.amp.GradScaler()\n\n # List parameter count for the user\n total_params = sum(\n p.numel() for p in self.modules.parameters() if p.requires_grad\n )\n if total_params > 0:\n clsname = self.__class__.__name__\n fmt_num = sb.utils.logger.format_order_of_magnitude(total_params)\n logger.info(f\"{fmt_num} trainable parameters in {clsname}\")\n\n if self.distributed_launch:\n self.rank = int(os.environ[\"RANK\"])\n if not torch.distributed.is_initialized():\n if self.rank > 0:\n sys.exit(\n \" ================ WARNING ===============\"\n \"Please add sb.ddp_init_group() into your exp.py\"\n \"To use DDP backend, start your script with:\\n\\t\"\n \"python -m torch.distributed.launch [args]\\n\\t\"\n \"experiment.py hyperparams.yaml \"\n \"--distributed_launch=True --distributed_backend=nccl\"\n )\n else:\n logger.warn(\n \"To use DDP, please add \"\n \"sb.utils.distributed.ddp_init_group() into your exp.py\"\n )\n logger.info(\n \"Only the main process is alive, \"\n \"all other subprocess were killed.\"\n )\n\n # Prepare iterating variables\n self.avg_train_loss = 0.0\n self.step = 0\n self.optimizer_step = 0\n\n # Add this class to the checkpointer for intra-epoch checkpoints\n if self.checkpointer is not None:\n self.checkpointer.add_recoverable(\"brain\", self)\n\n def compute_forward(self, batch, stage):\n \"\"\"Forward pass, to be overridden by sub-classes.\n\n Arguments\n ---------\n batch : torch.Tensor or tensors\n An element from the dataloader, including inputs for processing.\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n\n Returns\n -------\n torch.Tensor or Tensors\n The outputs after all processing is complete.\n Directly passed to ``compute_objectives()``.\n \"\"\"\n raise NotImplementedError\n\n def compute_objectives(self, predictions, batch, stage):\n \"\"\"Compute loss, to be overridden by sub-classes.\n\n Arguments\n ---------\n predictions : torch.Tensor or Tensors\n The output tensor or tensors to evaluate.\n Comes directly from ``compute_forward()``.\n batch : torch.Tensor or tensors\n An element from the dataloader, including targets for comparison.\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n\n Returns\n -------\n loss : torch.Tensor\n A tensor with the computed loss.\n \"\"\"\n raise NotImplementedError\n\n def on_stage_start(self, stage, epoch=None):\n \"\"\"Gets called when a stage starts.\n\n Useful for defining class variables used during the stage.\n\n Arguments\n ---------\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n epoch : int\n The current epoch count.\n \"\"\"\n pass\n\n def on_stage_end(self, stage, stage_loss, epoch=None):\n \"\"\"Gets called at the end of a stage.\n\n Useful for computing stage statistics, saving checkpoints, etc.\n\n Arguments\n ---------\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n stage_loss : float\n The average loss over the completed stage.\n epoch : int\n The current epoch count.\n \"\"\"\n pass\n\n def make_dataloader(\n self, dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs\n ):\n \"\"\"Creates DataLoaders for Datasets.\n\n This is used by ``fit()`` and ``evaluate()`` if they just receive\n Datasets.\n\n Alternatively, this can be called from outside the Brain subclass.\n In that case, the DataLoader should be passed to ``fit()`` in place\n of the dataset.\n\n The Stage.TRAIN DataLoader is handled specially. It has extra args for\n shuffle and drop_last. In DDP a DistributedSampler is created (unless\n the dataset is an IterableDataset).\n\n NOTE\n ----\n Some important DataLoader arguments are passed via **loader_kwargs,\n e.g., batch_size, num_workers, pin_memory.\n\n NOTE\n ----\n By default, ``evaluate()`` specifies ckpt_prefix=None to stop the test\n DataLoader being added to the checkpointer. If you need to add a\n recoverable after saving checkpoints (e.g., at test time, after\n checkpointing the training), and still be able to recover reasonably,\n you should probably specify ``allow_partial_load=True``.\n\n Arguments\n ---------\n dataset : Dataset\n A set of data to use to create data loader. If the Dataset is a\n DynamicItemDataset, PaddedBatch is used as the default collate_fn,\n unless specified in loader_kwargs.\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n ckpt_prefix : str, None\n Prefix to use for SaveableDataLoader Checkpoint name. The Stage\n name is added to this to create the full key. Set to None to not\n save the DataLoader.\n **loader_kwargs : dict\n Additional keyword arguments to the DataLoader.\n E.g., batch_size, num_workers, pin_memory.\n \"\"\"\n # TRAIN stage is handled specially.\n if stage == sb.Stage.TRAIN:\n loader_kwargs = self._train_loader_specifics(dataset, loader_kwargs)\n dataloader = sb.dataio.dataloader.make_dataloader(\n dataset, **loader_kwargs\n )\n\n if (\n self.checkpointer is not None\n and ckpt_prefix is not None\n and (\n isinstance(dataloader, SaveableDataLoader)\n or isinstance(dataloader, LoopedLoader)\n )\n ):\n ckpt_key = ckpt_prefix + stage.name\n self.checkpointer.add_recoverable(ckpt_key, dataloader)\n return dataloader\n\n def _train_loader_specifics(self, dataset, loader_kwargs):\n sampler = loader_kwargs.get(\"sampler\", None)\n # Shuffling should really only matter for the train stage. Shuffling\n # will also lead to more padding in batches if the order was otherwise\n # sorted by length.\n shuffle = loader_kwargs.get(\"shuffle\", False)\n if shuffle and not self.distributed_launch:\n if sampler is not None:\n raise ValueError(\n \"Cannot specify both shuffle=True\"\n \"and a sampler in loader_kwargs\"\n )\n sampler = ReproducibleRandomSampler(dataset)\n self.train_sampler = sampler\n loader_kwargs[\"sampler\"] = self.train_sampler\n # Delete the shuffle flag, since you cannot specify both a sampler and\n # shuffling:\n del loader_kwargs[\"shuffle\"]\n\n # Possibly make a DistributedSampler or a wrapper for some other sampler\n if self.distributed_launch and not isinstance(dataset, IterableDataset):\n drop_last = loader_kwargs.get(\"drop_last\", False)\n # num_replicas arg is equal to world_size\n # and retrieved automatically within\n # DistributedSampler obj.\n if sampler is not None:\n self.train_sampler = DistributedSamplerWrapper(\n sampler,\n rank=self.rank,\n drop_last=drop_last,\n shuffle=shuffle,\n )\n\n # with DistributedSamplerWrapper, one must disable shuffling for dataloader\n loader_kwargs[\"shuffle\"] = False\n loader_kwargs[\"sampler\"] = self.train_sampler\n elif loader_kwargs.get(\"batch_sampler\") is None:\n # no sampler and batch-sampler\n self.train_sampler = DistributedSampler(\n dataset, rank=self.rank, shuffle=False, drop_last=drop_last\n )\n\n # with DistributedSamplerWrapper, one must disable shuffling for dataloader\n loader_kwargs[\"shuffle\"] = False\n loader_kwargs[\"sampler\"] = self.train_sampler\n else: # batch_sampler was specified\n self.train_sampler = DistributedSamplerWrapper(\n loader_kwargs.get(\"batch_sampler\", None),\n rank=self.rank,\n shuffle=False,\n )\n loader_kwargs[\"batch_sampler\"] = self.train_sampler\n elif self.distributed_launch and isinstance(dataset, IterableDataset):\n logger.warning(\n \"Cannot automatically solve distributed sampling \"\n \"for IterableDataset.\"\n )\n return loader_kwargs\n\n def on_fit_start(self):\n \"\"\"Gets called at the beginning of ``fit()``, on multiple processes\n if ``distributed_count > 0`` and backend is ddp.\n\n Default implementation compiles the jit modules, initializes\n optimizers, and loads the latest checkpoint to resume training.\n \"\"\"\n # Run this *after* starting all processes since jit modules cannot be\n # pickled.\n self._compile_jit()\n\n # Wrap modules with parallel backend after jit\n self._wrap_distributed()\n\n # Initialize optimizers after parameters are configured\n self.init_optimizers()\n\n # Load latest checkpoint to resume training if interrupted\n if self.checkpointer is not None:\n self.checkpointer.recover_if_possible(\n device=torch.device(self.device)\n )\n\n def init_optimizers(self):\n \"\"\"Called during ``on_fit_start()``, initialize optimizers\n after parameters are fully configured (e.g. DDP, jit).\n\n The default implementation of this method depends on an optimizer\n class being passed at initialization that takes only a list\n of parameters (e.g., a lambda or a partial function definition).\n This creates a single optimizer that optimizes all trainable params.\n\n Override this class if there are multiple optimizers.\n \"\"\"\n if self.opt_class is not None:\n self.optimizer = self.opt_class(self.modules.parameters())\n\n if self.checkpointer is not None:\n self.checkpointer.add_recoverable(\"optimizer\", self.optimizer)\n\n def on_evaluate_start(self, max_key=None, min_key=None):\n \"\"\"Gets called at the beginning of ``evaluate()``\n\n Default implementation loads the best-performing checkpoint for\n evaluation, based on stored metrics.\n\n Arguments\n ---------\n max_key : str\n Key to use for finding best checkpoint (higher is better).\n By default, passed to ``self.checkpointer.recover_if_possible()``.\n min_key : str\n Key to use for finding best checkpoint (lower is better).\n By default, passed to ``self.checkpointer.recover_if_possible()``.\n \"\"\"\n\n # Recover best checkpoint for evaluation\n if self.checkpointer is not None:\n self.checkpointer.recover_if_possible(\n max_key=max_key,\n min_key=min_key,\n device=torch.device(self.device),\n )\n\n def fit_batch(self, batch):\n \"\"\"Fit one batch, override to do multiple updates.\n\n The default implementation depends on a few methods being defined\n with a particular behavior:\n\n * ``compute_forward()``\n * ``compute_objectives()``\n\n Also depends on having optimizers passed at initialization.\n\n Arguments\n ---------\n batch : list of torch.Tensors\n Batch of data to use for training. Default implementation assumes\n this batch has two elements: inputs and targets.\n\n Returns\n -------\n detached loss\n \"\"\"\n should_step = self.step % self.grad_accumulation_factor == 0\n # Managing automatic mixed precision\n if self.auto_mix_prec:\n self.optimizer.zero_grad()\n with torch.cuda.amp.autocast():\n outputs = self.compute_forward(batch, Stage.TRAIN)\n loss = self.compute_objectives(outputs, batch, Stage.TRAIN)\n self.scaler.scale(loss / self.grad_accumulation_factor).backward()\n if should_step:\n self.scaler.unscale_(self.optimizer)\n if self.check_gradients(loss):\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer_step += 1\n else:\n outputs = self.compute_forward(batch, Stage.TRAIN)\n loss = self.compute_objectives(outputs, batch, Stage.TRAIN)\n (loss / self.grad_accumulation_factor).backward()\n if should_step:\n if self.check_gradients(loss):\n self.optimizer.step()\n self.optimizer.zero_grad()\n self.optimizer_step += 1\n\n return loss.detach().cpu()\n\n def check_gradients(self, loss):\n \"\"\"Check if gradients are finite and not too large.\n\n Automatically clips large gradients.\n\n Arguments\n ---------\n loss : tensor\n The loss tensor after ``backward()`` has been called but\n before the optimizers ``step()``.\n\n Returns\n -------\n bool\n Whether or not the optimizer step should be carried out.\n \"\"\"\n if not torch.isfinite(loss):\n self.nonfinite_count += 1\n\n # Print helpful debug info\n logger.warn(f\"Loss is {loss}.\")\n for p in self.modules.parameters():\n if not torch.isfinite(p).all():\n logger.warn(\"Parameter is not finite: \" + str(p))\n\n # Check if patience is exhausted\n if self.nonfinite_count > self.nonfinite_patience:\n raise ValueError(\n \"Loss is not finite and patience is exhausted. \"\n \"To debug, wrap `fit()` with \"\n \"autograd's `detect_anomaly()`, e.g.\\n\\nwith \"\n \"torch.autograd.detect_anomaly():\\n\\tbrain.fit(...)\"\n )\n else:\n logger.warn(\"Patience not yet exhausted, ignoring this batch.\")\n return False\n\n # Clip gradient norm\n torch.nn.utils.clip_grad_norm_(\n (p for p in self.modules.parameters()), self.max_grad_norm\n )\n\n return True\n\n def evaluate_batch(self, batch, stage):\n \"\"\"Evaluate one batch, override for different procedure than train.\n\n The default implementation depends on two methods being defined\n with a particular behavior:\n\n * ``compute_forward()``\n * ``compute_objectives()``\n\n Arguments\n ---------\n batch : list of torch.Tensors\n Batch of data to use for evaluation. Default implementation assumes\n this batch has two elements: inputs and targets.\n stage : Stage\n The stage of the experiment: Stage.VALID, Stage.TEST\n\n Returns\n -------\n detached loss\n \"\"\"\n\n out = self.compute_forward(batch, stage=stage)\n loss = self.compute_objectives(out, batch, stage=stage)\n return loss.detach().cpu()\n\n def fit(\n self,\n epoch_counter,\n train_set,\n valid_set=None,\n progressbar=None,\n train_loader_kwargs={},\n valid_loader_kwargs={},\n ):\n \"\"\"Iterate epochs and datasets to improve objective.\n\n Relies on the existence of multiple functions that can (or should) be\n overridden. The following methods are used and expected to have a\n certain behavior:\n\n * ``fit_batch()``\n * ``evaluate_batch()``\n * ``update_average()``\n\n If the initialization was done with distributed_count > 0 and the\n distributed_backend is ddp, this will generally handle multiprocess\n logic, like splitting the training data into subsets for each device and\n only saving a checkpoint on the main process.\n\n Arguments\n ---------\n epoch_counter : iterable\n Each call should return an integer indicating the epoch count.\n train_set : Dataset, DataLoader\n A set of data to use for training. If a Dataset is given, a\n DataLoader is automatically created. If a DataLoader is given, it is\n used directly.\n valid_set : Dataset, DataLoader\n A set of data to use for validation. If a Dataset is given, a\n DataLoader is automatically created. If a DataLoader is given, it is\n used directly.\n train_loader_kwargs : dict\n Kwargs passed to `make_dataloader()` for making the train_loader\n (if train_set is a Dataset, not DataLoader).\n E.G. batch_size, num_workers.\n DataLoader kwargs are all valid.\n valid_loader_kwargs : dict\n Kwargs passed to `make_dataloader()` for making the valid_loader\n (if valid_set is a Dataset, not DataLoader).\n E.g., batch_size, num_workers.\n DataLoader kwargs are all valid.\n progressbar : bool\n Whether to display the progress of each epoch in a progressbar.\n \"\"\"\n\n if not (\n isinstance(train_set, DataLoader)\n or isinstance(train_set, LoopedLoader)\n ):\n train_set = self.make_dataloader(\n train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs\n )\n if valid_set is not None and not (\n isinstance(valid_set, DataLoader)\n or isinstance(valid_set, LoopedLoader)\n ):\n valid_set = self.make_dataloader(\n valid_set,\n stage=sb.Stage.VALID,\n ckpt_prefix=None,\n **valid_loader_kwargs,\n )\n\n self.on_fit_start()\n\n if progressbar is None:\n progressbar = not self.noprogressbar\n\n # Iterate epochs\n for epoch in epoch_counter:\n # Training stage\n self.on_stage_start(Stage.TRAIN, epoch)\n self.modules.train()\n\n # Reset nonfinite count to 0 each epoch\n self.nonfinite_count = 0\n\n if self.train_sampler is not None and hasattr(\n self.train_sampler, \"set_epoch\"\n ):\n self.train_sampler.set_epoch(epoch)\n\n # Time since last intra-epoch checkpoint\n last_ckpt_time = time.time()\n\n # Only show progressbar if requested and main_process\n enable = progressbar and sb.utils.distributed.if_main_process()\n with tqdm(\n train_set,\n initial=self.step,\n dynamic_ncols=True,\n disable=not enable,\n ) as t:\n for batch in t:\n if self._optimizer_step_limit_exceeded:\n logger.info(\"Train iteration limit exceeded\")\n break\n self.step += 1\n loss = self.fit_batch(batch)\n self.avg_train_loss = self.update_average(\n loss, self.avg_train_loss\n )\n t.set_postfix(train_loss=self.avg_train_loss)\n\n # Debug mode only runs a few batches\n if self.debug and self.step == self.debug_batches:\n break\n\n if (\n self.checkpointer is not None\n and self.ckpt_interval_minutes > 0\n and time.time() - last_ckpt_time\n >= self.ckpt_interval_minutes * 60.0\n ):\n # This should not use run_on_main, because that\n # includes a DDP barrier. That eventually leads to a\n # crash when the processes'\n # time.time() - last_ckpt_time differ and some\n # processes enter this block while others don't,\n # missing the barrier.\n if sb.utils.distributed.if_main_process():\n self._save_intra_epoch_ckpt()\n last_ckpt_time = time.time()\n\n # Run train \"on_stage_end\" on all processes\n self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch)\n self.avg_train_loss = 0.0\n self.step = 0\n\n # Validation stage\n if valid_set is not None:\n self.on_stage_start(Stage.VALID, epoch)\n self.modules.eval()\n avg_valid_loss = 0.0\n with torch.no_grad():\n for batch in tqdm(\n valid_set, dynamic_ncols=True, disable=not enable\n ):\n self.step += 1\n loss = self.evaluate_batch(batch, stage=Stage.VALID)\n avg_valid_loss = self.update_average(\n loss, avg_valid_loss\n )\n\n # Debug mode only runs a few batches\n if self.debug and self.step == self.debug_batches:\n break\n\n # Only run validation \"on_stage_end\" on main process\n self.step = 0\n run_on_main(\n self.on_stage_end,\n args=[Stage.VALID, avg_valid_loss, epoch],\n )\n\n # Debug mode only runs a few epochs\n if (\n self.debug\n and epoch == self.debug_epochs\n or self._optimizer_step_limit_exceeded\n ):\n break\n\n @property\n def _optimizer_step_limit_exceeded(self):\n return (\n self.optimizer_step_limit is not None\n and self.optimizer_step >= self.optimizer_step_limit\n )\n\n def _save_intra_epoch_ckpt(self):\n \"\"\"Saves a CKPT with specific intra-epoch flag.\"\"\"\n self.checkpointer.save_and_keep_only(\n end_of_epoch=False,\n num_to_keep=1,\n ckpt_predicate=lambda c: INTRA_EPOCH_CKPT_FLAG in c.meta,\n meta={INTRA_EPOCH_CKPT_FLAG: True},\n verbosity=logging.DEBUG,\n )\n\n def _compile_jit(self):\n \"\"\"Compile requested modules with ``torch.jit.script``.\"\"\"\n if self.jit_module_keys is None:\n return\n\n for name in self.jit_module_keys:\n if name not in self.modules:\n raise ValueError(\n \"module\" + name + \" is not defined in your hparams file.\"\n )\n module = torch.jit.script(self.modules[name])\n self.modules[name] = module.to(self.device)\n\n def _wrap_distributed(self):\n \"\"\"Wrap modules with distributed wrapper when requested.\"\"\"\n if not self.distributed_launch and not self.data_parallel_backend:\n return\n elif self.distributed_launch:\n for name, module in self.modules.items():\n if any(p.requires_grad for p in module.parameters()):\n module = SyncBatchNorm.convert_sync_batchnorm(module)\n module = DDP(\n module,\n device_ids=[self.device],\n find_unused_parameters=self.find_unused_parameters,\n )\n self.modules[name] = module\n else:\n # data_parallel_backend\n for name, module in self.modules.items():\n if any(p.requires_grad for p in module.parameters()):\n module = DP(module)\n self.modules[name] = module\n\n def evaluate(\n self,\n test_set,\n max_key=None,\n min_key=None,\n progressbar=None,\n test_loader_kwargs={},\n ):\n \"\"\"Iterate test_set and evaluate brain performance. By default, loads\n the best-performing checkpoint (as recorded using the checkpointer).\n\n Arguments\n ---------\n test_set : Dataset, DataLoader\n If a DataLoader is given, it is iterated directly. Otherwise passed\n to ``self.make_dataloader()``.\n max_key : str\n Key to use for finding best checkpoint, passed to\n ``on_evaluate_start()``.\n min_key : str\n Key to use for finding best checkpoint, passed to\n ``on_evaluate_start()``.\n progressbar : bool\n Whether to display the progress in a progressbar.\n test_loader_kwargs : dict\n Kwargs passed to ``make_dataloader()`` if ``test_set`` is not a\n DataLoader. NOTE: ``loader_kwargs[\"ckpt_prefix\"]`` gets\n automatically overwritten to ``None`` (so that the test DataLoader\n is not added to the checkpointer).\n\n Returns\n -------\n average test loss\n \"\"\"\n if progressbar is None:\n progressbar = not self.noprogressbar\n\n if not (\n isinstance(test_set, DataLoader)\n or isinstance(test_set, LoopedLoader)\n ):\n test_loader_kwargs[\"ckpt_prefix\"] = None\n test_set = self.make_dataloader(\n test_set, Stage.TEST, **test_loader_kwargs\n )\n self.on_evaluate_start(max_key=max_key, min_key=min_key)\n self.on_stage_start(Stage.TEST, epoch=None)\n self.modules.eval()\n avg_test_loss = 0.0\n with torch.no_grad():\n for batch in tqdm(\n test_set, dynamic_ncols=True, disable=not progressbar\n ):\n self.step += 1\n loss = self.evaluate_batch(batch, stage=Stage.TEST)\n avg_test_loss = self.update_average(loss, avg_test_loss)\n\n # Debug mode only runs a few batches\n if self.debug and self.step == self.debug_batches:\n break\n\n # Only run evaluation \"on_stage_end\" on main process\n run_on_main(\n self.on_stage_end, args=[Stage.TEST, avg_test_loss, None]\n )\n self.step = 0\n\n def update_average(self, loss, avg_loss):\n \"\"\"Update running average of the loss.\n\n Arguments\n ---------\n loss : torch.tensor\n detached loss, a single float value.\n avg_loss : float\n current running average.\n\n Returns\n -------\n avg_loss : float\n The average loss.\n \"\"\"\n if torch.isfinite(loss):\n avg_loss -= avg_loss / self.step\n avg_loss += float(loss) / self.step\n return avg_loss\n\n @sb.utils.checkpoints.mark_as_saver\n def _save(self, path):\n save_dict = {\n \"step\": self.step,\n \"avg_train_loss\": self.avg_train_loss,\n \"optimizer_step\": self.optimizer_step,\n }\n with open(path, \"w\") as w:\n w.write(yaml.dump(save_dict))\n\n @sb.utils.checkpoints.mark_as_loader\n def _recover(self, path, end_of_epoch, device):\n del end_of_epoch\n del device\n with open(path) as f:\n save_dict = yaml.safe_load(f)\n self.step = save_dict[\"step\"]\n self.avg_train_loss = save_dict[\"avg_train_loss\"]\n self.optimizer_step = save_dict[\"optimizer_step\"]\n", "path": "speechbrain/core.py" } ]
[ { "content": "\"\"\"Core SpeechBrain code for running experiments.\n\nAuthors\n * Peter Plantinga 2020\n * Abdel Heba 2020\n * Mirco Ravanelli 2020\n * Aku Rouhe 2021\n\"\"\"\n\nimport os\nimport sys\nimport yaml\nimport time\nimport torch\nimport shutil\nimport logging\nimport inspect\nimport pathlib\nimport argparse\nimport tempfile\nimport speechbrain as sb\nfrom datetime import date\nfrom enum import Enum, auto\nfrom tqdm.contrib import tqdm\nfrom types import SimpleNamespace\nfrom torch.nn import SyncBatchNorm\nfrom torch.utils.data import DataLoader\nfrom torch.nn import DataParallel as DP\nfrom torch.utils.data import IterableDataset\nfrom torch.utils.data import DistributedSampler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom hyperpyyaml import resolve_references\nfrom speechbrain.utils.distributed import run_on_main\nfrom speechbrain.dataio.dataloader import LoopedLoader\nfrom speechbrain.dataio.dataloader import SaveableDataLoader\nfrom speechbrain.dataio.sampler import DistributedSamplerWrapper\nfrom speechbrain.dataio.sampler import ReproducibleRandomSampler\n\nlogger = logging.getLogger(__name__)\nDEFAULT_LOG_CONFIG = os.path.dirname(os.path.abspath(__file__))\nDEFAULT_LOG_CONFIG = os.path.join(DEFAULT_LOG_CONFIG, \"log-config.yaml\")\ntorch._C._jit_set_profiling_executor(False)\ntorch._C._jit_set_profiling_mode(False)\nINTRA_EPOCH_CKPT_FLAG = \"brain_intra_epoch_ckpt\"\nPYTHON_VERSION_MAJOR = 3\nPYTHON_VERSION_MINOR = 7\n\n\ndef create_experiment_directory(\n experiment_directory,\n hyperparams_to_save=None,\n overrides={},\n log_config=DEFAULT_LOG_CONFIG,\n save_env_desc=True,\n):\n \"\"\"Create the output folder and relevant experimental files.\n\n Arguments\n ---------\n experiment_directory : str\n The place where the experiment directory should be created.\n hyperparams_to_save : str\n A filename of a yaml file representing the parameters for this\n experiment. If passed, references are resolved, and the result is\n written to a file in the experiment directory called \"hyperparams.yaml\".\n overrides : dict\n A mapping of replacements made in the yaml file, to save in yaml.\n log_config : str\n A yaml filename containing configuration options for the logger.\n save_env_desc : bool\n If True, an environment state description is saved to the experiment\n directory, in a file called env.log in the experiment directory.\n \"\"\"\n try:\n # all writing command must be done with the main_process\n if sb.utils.distributed.if_main_process():\n if not os.path.isdir(experiment_directory):\n os.makedirs(experiment_directory)\n\n # Write the parameters file\n if hyperparams_to_save is not None:\n hyperparams_filename = os.path.join(\n experiment_directory, \"hyperparams.yaml\"\n )\n with open(hyperparams_to_save) as f:\n resolved_yaml = resolve_references(f, overrides)\n with open(hyperparams_filename, \"w\") as w:\n print(\"# Generated %s from:\" % date.today(), file=w)\n print(\"# %s\" % os.path.abspath(hyperparams_to_save), file=w)\n print(\"# yamllint disable\", file=w)\n shutil.copyfileobj(resolved_yaml, w)\n\n # Copy executing file to output directory\n module = inspect.getmodule(inspect.currentframe().f_back)\n if module is not None:\n callingfile = os.path.realpath(module.__file__)\n shutil.copy(callingfile, experiment_directory)\n\n # Log exceptions to output automatically\n log_file = os.path.join(experiment_directory, \"log.txt\")\n logger_overrides = {\n \"handlers\": {\"file_handler\": {\"filename\": log_file}}\n }\n sb.utils.logger.setup_logging(log_config, logger_overrides)\n sys.excepthook = _logging_excepthook\n\n # Log beginning of experiment!\n logger.info(\"Beginning experiment!\")\n logger.info(f\"Experiment folder: {experiment_directory}\")\n\n # Save system description:\n if save_env_desc:\n description_str = sb.utils.logger.get_environment_description()\n with open(\n os.path.join(experiment_directory, \"env.log\"), \"w\"\n ) as fo:\n fo.write(description_str)\n finally:\n # wait for main_process if ddp is used\n sb.utils.distributed.ddp_barrier()\n\n\ndef _logging_excepthook(exc_type, exc_value, exc_traceback):\n \"\"\"Interrupt exception raising to log the error.\"\"\"\n logger.error(\"Exception:\", exc_info=(exc_type, exc_value, exc_traceback))\n\n\ndef parse_arguments(arg_list=None):\n r\"\"\"Parse command-line arguments to the experiment.\n\n Arguments\n ---------\n arg_list : list, None\n A list of arguments to parse. If not given, this is read from\n `sys.argv[1:]`\n\n Returns\n -------\n param_file : str\n The location of the parameters file.\n run_opts : dict\n Run options, such as distributed, device, etc.\n overrides : dict\n The overrides to pass to ``load_hyperpyyaml``.\n\n Example\n -------\n >>> argv = ['hyperparams.yaml', '--device', 'cuda:1', '--seed', '10']\n >>> filename, run_opts, overrides = parse_arguments(argv)\n >>> filename\n 'hyperparams.yaml'\n >>> run_opts[\"device\"]\n 'cuda:1'\n >>> overrides\n 'seed: 10'\n \"\"\"\n if arg_list is None:\n arg_list = sys.argv[1:]\n parser = argparse.ArgumentParser(description=\"Run a SpeechBrain experiment\")\n parser.add_argument(\n \"param_file\",\n type=str,\n help=\"A yaml-formatted file using the extended YAML syntax. \"\n \"defined by SpeechBrain.\",\n )\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"Run the experiment with only a few batches for all \"\n \"datasets, to ensure code runs without crashing.\",\n )\n parser.add_argument(\n \"--debug_batches\",\n type=int,\n default=2,\n help=\"Number of batches to run in debug mode.\",\n )\n parser.add_argument(\n \"--debug_epochs\",\n type=int,\n default=2,\n help=\"Number of epochs to run in debug mode. \"\n \"If a non-positive number is passed, all epochs are run.\",\n )\n parser.add_argument(\n \"--log_config\",\n type=str,\n help=\"A file storing the configuration options for logging\",\n )\n # if use_env = False in torch.distributed.lunch then local_rank arg is given\n parser.add_argument(\"--local_rank\", type=int, help=\"Rank on local machine\")\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"cuda:0\",\n help=\"The device to run the experiment on (e.g. 'cuda:0')\",\n )\n parser.add_argument(\n \"--data_parallel_backend\",\n default=False,\n action=\"store_true\",\n help=\"This flag enables training with data_parallel.\",\n )\n parser.add_argument(\n \"--distributed_launch\",\n default=False,\n action=\"store_true\",\n help=\"This flag enables training with DDP. Assumes script run with \"\n \"`torch.distributed.launch`\",\n )\n parser.add_argument(\n \"--distributed_backend\",\n type=str,\n default=\"nccl\",\n help=\"One of {nccl, gloo, mpi}\",\n )\n parser.add_argument(\n \"--find_unused_parameters\",\n default=False,\n action=\"store_true\",\n help=\"This flag disable unused parameters detection\",\n )\n parser.add_argument(\n \"--jit_module_keys\",\n type=str,\n nargs=\"*\",\n help=\"A list of keys in the 'modules' dict to jitify\",\n )\n parser.add_argument(\n \"--auto_mix_prec\",\n default=None,\n action=\"store_true\",\n help=\"This flag enables training with automatic mixed-precision.\",\n )\n parser.add_argument(\n \"--max_grad_norm\",\n type=float,\n help=\"Gradient norm will be clipped to this value, \"\n \"enter negative value to disable.\",\n )\n parser.add_argument(\n \"--nonfinite_patience\",\n type=int,\n help=\"Max number of batches per epoch to skip if loss is nonfinite.\",\n )\n parser.add_argument(\n \"--noprogressbar\",\n default=None,\n action=\"store_true\",\n help=\"This flag disables the data loop progressbars.\",\n )\n parser.add_argument(\n \"--ckpt_interval_minutes\",\n type=float,\n help=\"Amount of time between saving intra-epoch checkpoints \"\n \"in minutes. If non-positive, intra-epoch checkpoints are not saved.\",\n )\n parser.add_argument(\n \"--grad_accumulation_factor\",\n type=int,\n help=\"Number of batches to accumulate gradients before optimizer step\",\n )\n parser.add_argument(\n \"--optimizer_step_limit\",\n type=int,\n help=\"Number of optimizer steps to run. If not passed, all epochs are run.\",\n )\n\n # Accept extra args to override yaml\n run_opts, overrides = parser.parse_known_args(arg_list)\n\n # Ignore items that are \"None\", they were not passed\n run_opts = {k: v for k, v in vars(run_opts).items() if v is not None}\n\n param_file = run_opts[\"param_file\"]\n del run_opts[\"param_file\"]\n\n overrides = _convert_to_yaml(overrides)\n\n # Checking that DataParallel use the right number of GPU\n if run_opts[\"data_parallel_backend\"]:\n if torch.cuda.device_count() == 0:\n raise ValueError(\"You must have at least 1 GPU.\")\n\n # For DDP, the device args must equal to local_rank used by\n # torch.distributed.launch. If run_opts[\"local_rank\"] exists,\n # use os.environ[\"LOCAL_RANK\"]\n local_rank = None\n if \"local_rank\" in run_opts:\n local_rank = run_opts[\"local_rank\"]\n else:\n if \"LOCAL_RANK\" in os.environ and os.environ[\"LOCAL_RANK\"] != \"\":\n local_rank = int(os.environ[\"LOCAL_RANK\"])\n\n # force device arg to be the same as local_rank from torch.distributed.lunch\n if local_rank is not None and \"cuda\" in run_opts[\"device\"]:\n run_opts[\"device\"] = run_opts[\"device\"][:-1] + str(local_rank)\n\n return param_file, run_opts, overrides\n\n\ndef _convert_to_yaml(overrides):\n \"\"\"Convert args to yaml for overrides\"\"\"\n yaml_string = \"\"\n\n # Handle '--arg=val' type args\n joined_args = \"=\".join(overrides)\n split_args = joined_args.split(\"=\")\n\n for arg in split_args:\n if arg.startswith(\"--\"):\n yaml_string += \"\\n\" + arg[len(\"--\") :] + \":\"\n else:\n yaml_string += \" \" + arg\n\n return yaml_string.strip()\n\n\nclass Stage(Enum):\n \"\"\"Simple enum to track stage of experiments.\"\"\"\n\n TRAIN = auto()\n VALID = auto()\n TEST = auto()\n\n\[email protected]_checkpoint_hooks\nclass Brain:\n r\"\"\"Brain class abstracts away the details of data loops.\n\n The primary purpose of the `Brain` class is the implementation of\n the ``fit()`` method, which iterates epochs and datasets for the\n purpose of \"fitting\" a set of modules to a set of data.\n\n In order to use the ``fit()`` method, one should sub-class the ``Brain``\n class and override any methods for which the default behavior does not\n match the use case. For a simple use case (e.g., training a single model\n with a single dataset) the only methods that need to be overridden are:\n\n * ``compute_forward()``\n * ``compute_objectives()``\n\n The example below illustrates how overriding these two methods is done.\n\n For more complicated use cases, such as multiple modules that need to\n be updated, the following methods can be overridden:\n\n * ``fit_batch()``\n * ``evaluate_batch()``\n\n Arguments\n ---------\n modules : dict of str:torch.nn.Module pairs\n These modules are passed to the optimizer by default if they have\n trainable parameters, and will have ``train()``/``eval()`` called on them.\n opt_class : torch.optim class\n A torch optimizer constructor that has takes only the list of\n parameters (e.g. a lambda or partial function definition). By default,\n this will be passed all modules in ``modules`` at the\n beginning of the ``fit()`` method. This behavior can be changed\n by overriding the ``configure_optimizers()`` method.\n hparams : dict\n Each key:value pair should consist of a string key and a hyperparameter\n that is used within the overridden methods. These will\n be accessible via an ``hparams`` attribute, using \"dot\" notation:\n e.g., self.hparams.model(x).\n run_opts : dict\n A set of options to change the runtime environment, including\n\n debug (bool)\n If ``True``, this will only iterate a few batches for all\n datasets, to ensure code runs without crashing.\n debug_batches (int)\n Number of batches to run in debug mode, Default ``2``.\n debug_epochs (int)\n Number of epochs to run in debug mode, Default ``2``.\n If a non-positive number is passed, all epochs are run.\n jit_module_keys (list of str)\n List of keys in ``modules`` that should be jit compiled.\n distributed_backend (str)\n One of ``nccl``, ``gloo``, ``mpi``.\n device (str)\n The location for performing computations.\n auto_mix_prec (bool)\n If ``True``, automatic mixed-precision is used.\n Activate it only with cuda.\n max_grad_norm (float)\n Default implementation of ``fit_batch()`` uses\n ``clip_grad_norm_`` with this value. Default: ``5``.\n nonfinite_patience (int)\n Number of times to ignore non-finite losses before stopping.\n Default: ``3``.\n noprogressbar (bool)\n Whether to turn off progressbar when training. Default: ``False``.\n ckpt_interval_minutes (float)\n Amount of time between saving intra-epoch checkpoints,\n in minutes, default: ``15.0``. If non-positive, these are not saved.\n\n Typically in a script this comes from ``speechbrain.parse_args``, which\n has different defaults than Brain. If an option is not defined here\n (keep in mind that parse_args will inject some options by default),\n then the option is also searched for in hparams (by key).\n checkpointer : speechbrain.Checkpointer\n By default, this will be used to load checkpoints, and will have the\n optimizer added to continue training if interrupted.\n\n Example\n -------\n >>> from torch.optim import SGD\n >>> class SimpleBrain(Brain):\n ... def compute_forward(self, batch, stage):\n ... return self.modules.model(batch[0])\n ... def compute_objectives(self, predictions, batch, stage):\n ... return torch.nn.functional.l1_loss(predictions, batch[0])\n >>> model = torch.nn.Linear(in_features=10, out_features=10)\n >>> brain = SimpleBrain({\"model\": model}, opt_class=lambda x: SGD(x, 0.1))\n >>> brain.fit(range(1), ([torch.rand(10, 10), torch.rand(10, 10)],))\n \"\"\"\n\n def __init__( # noqa: C901\n self,\n modules=None,\n opt_class=None,\n hparams=None,\n run_opts=None,\n checkpointer=None,\n ):\n self.opt_class = opt_class\n self.checkpointer = checkpointer\n\n # Arguments passed via the run opts dictionary\n run_opt_defaults = {\n \"debug\": False,\n \"debug_batches\": 2,\n \"debug_epochs\": 2,\n \"device\": \"cpu\",\n \"data_parallel_backend\": False,\n \"distributed_launch\": False,\n \"distributed_backend\": \"nccl\",\n \"find_unused_parameters\": False,\n \"jit_module_keys\": None,\n \"auto_mix_prec\": False,\n \"max_grad_norm\": 5.0,\n \"nonfinite_patience\": 3,\n \"noprogressbar\": False,\n \"ckpt_interval_minutes\": 0,\n \"grad_accumulation_factor\": 1,\n \"optimizer_step_limit\": None,\n }\n\n for arg, default in run_opt_defaults.items():\n if run_opts is not None and arg in run_opts:\n if hparams is not None and arg in hparams:\n logger.info(\n \"Info: \"\n + arg\n + \" arg overridden by command line input to: \"\n + str(run_opts[arg])\n )\n setattr(self, arg, run_opts[arg])\n else:\n # If any arg from run_opt_defaults exist in hparams and\n # not in command line args \"run_opts\"\n if hparams is not None and arg in hparams:\n logger.info(\n \"Info: \" + arg + \" arg from hparam file is used\"\n )\n setattr(self, arg, hparams[arg])\n else:\n setattr(self, arg, default)\n\n # Check Python version\n if not (\n sys.version_info.major == PYTHON_VERSION_MAJOR\n and sys.version_info.minor >= PYTHON_VERSION_MINOR\n ):\n logger.warn(\n \"Detected Python \"\n + str(sys.version_info.major)\n + \".\"\n + str(sys.version_info.minor)\n + \". We suggest using SpeechBrain with Python >=\"\n + str(PYTHON_VERSION_MAJOR)\n + \".\"\n + str(PYTHON_VERSION_MINOR)\n )\n\n if self.data_parallel_backend and self.distributed_launch:\n sys.exit(\n \"To use data_parallel backend, start your script with:\\n\\t\"\n \"python experiment.py hyperparams.yaml \"\n \"--data_parallel_backend=True\"\n \"To use DDP backend, start your script with:\\n\\t\"\n \"python -m torch.distributed.lunch [args]\\n\"\n \"experiment.py hyperparams.yaml --distributed_launch=True \"\n \"--distributed_backend=nccl\"\n )\n\n # Switch to the right context\n if self.device == \"cuda\":\n torch.cuda.set_device(0)\n elif \"cuda\" in self.device:\n torch.cuda.set_device(int(self.device[-1]))\n\n # Put modules on the right device, accessible with dot notation\n self.modules = torch.nn.ModuleDict(modules).to(self.device)\n\n # Make hyperparams available with dot notation too\n if hparams is not None:\n self.hparams = SimpleNamespace(**hparams)\n\n # Checkpointer should point at a temporary directory in debug mode\n if (\n self.debug\n and self.checkpointer is not None\n and hasattr(self.checkpointer, \"checkpoints_dir\")\n ):\n tempdir = tempfile.TemporaryDirectory()\n logger.info(\n \"Since debug mode is active, switching checkpointer \"\n f\"output to temporary directory: {tempdir.name}\"\n )\n self.checkpointer.checkpoints_dir = pathlib.Path(tempdir.name)\n\n # Keep reference to tempdir as long as checkpointer exists\n self.checkpointer.tempdir = tempdir\n\n # Sampler should be handled by `make_dataloader`\n # or if you provide a DataLoader directly, you can set\n # this.train_sampler = your_sampler\n # to have your_sampler.set_epoch() called on each epoch.\n self.train_sampler = None\n\n # Automatic mixed precision init\n if self.auto_mix_prec:\n self.scaler = torch.cuda.amp.GradScaler()\n\n # List parameter count for the user\n total_params = sum(\n p.numel() for p in self.modules.parameters() if p.requires_grad\n )\n if total_params > 0:\n clsname = self.__class__.__name__\n fmt_num = sb.utils.logger.format_order_of_magnitude(total_params)\n logger.info(f\"{fmt_num} trainable parameters in {clsname}\")\n\n if self.distributed_launch:\n self.rank = int(os.environ[\"RANK\"])\n if not torch.distributed.is_initialized():\n if self.rank > 0:\n sys.exit(\n \" ================ WARNING ===============\"\n \"Please add sb.ddp_init_group() into your exp.py\"\n \"To use DDP backend, start your script with:\\n\\t\"\n \"python -m torch.distributed.launch [args]\\n\\t\"\n \"experiment.py hyperparams.yaml \"\n \"--distributed_launch=True --distributed_backend=nccl\"\n )\n else:\n logger.warn(\n \"To use DDP, please add \"\n \"sb.utils.distributed.ddp_init_group() into your exp.py\"\n )\n logger.info(\n \"Only the main process is alive, \"\n \"all other subprocess were killed.\"\n )\n\n # Prepare iterating variables\n self.avg_train_loss = 0.0\n self.step = 0\n self.optimizer_step = 0\n\n # Add this class to the checkpointer for intra-epoch checkpoints\n if self.checkpointer is not None:\n self.checkpointer.add_recoverable(\"brain\", self)\n\n def compute_forward(self, batch, stage):\n \"\"\"Forward pass, to be overridden by sub-classes.\n\n Arguments\n ---------\n batch : torch.Tensor or tensors\n An element from the dataloader, including inputs for processing.\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n\n Returns\n -------\n torch.Tensor or Tensors\n The outputs after all processing is complete.\n Directly passed to ``compute_objectives()``.\n \"\"\"\n raise NotImplementedError\n\n def compute_objectives(self, predictions, batch, stage):\n \"\"\"Compute loss, to be overridden by sub-classes.\n\n Arguments\n ---------\n predictions : torch.Tensor or Tensors\n The output tensor or tensors to evaluate.\n Comes directly from ``compute_forward()``.\n batch : torch.Tensor or tensors\n An element from the dataloader, including targets for comparison.\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n\n Returns\n -------\n loss : torch.Tensor\n A tensor with the computed loss.\n \"\"\"\n raise NotImplementedError\n\n def on_stage_start(self, stage, epoch=None):\n \"\"\"Gets called when a stage starts.\n\n Useful for defining class variables used during the stage.\n\n Arguments\n ---------\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n epoch : int\n The current epoch count.\n \"\"\"\n pass\n\n def on_stage_end(self, stage, stage_loss, epoch=None):\n \"\"\"Gets called at the end of a stage.\n\n Useful for computing stage statistics, saving checkpoints, etc.\n\n Arguments\n ---------\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n stage_loss : float\n The average loss over the completed stage.\n epoch : int\n The current epoch count.\n \"\"\"\n pass\n\n def make_dataloader(\n self, dataset, stage, ckpt_prefix=\"dataloader-\", **loader_kwargs\n ):\n \"\"\"Creates DataLoaders for Datasets.\n\n This is used by ``fit()`` and ``evaluate()`` if they just receive\n Datasets.\n\n Alternatively, this can be called from outside the Brain subclass.\n In that case, the DataLoader should be passed to ``fit()`` in place\n of the dataset.\n\n The Stage.TRAIN DataLoader is handled specially. It has extra args for\n shuffle and drop_last. In DDP a DistributedSampler is created (unless\n the dataset is an IterableDataset).\n\n NOTE\n ----\n Some important DataLoader arguments are passed via **loader_kwargs,\n e.g., batch_size, num_workers, pin_memory.\n\n NOTE\n ----\n By default, ``evaluate()`` specifies ckpt_prefix=None to stop the test\n DataLoader being added to the checkpointer. If you need to add a\n recoverable after saving checkpoints (e.g., at test time, after\n checkpointing the training), and still be able to recover reasonably,\n you should probably specify ``allow_partial_load=True``.\n\n Arguments\n ---------\n dataset : Dataset\n A set of data to use to create data loader. If the Dataset is a\n DynamicItemDataset, PaddedBatch is used as the default collate_fn,\n unless specified in loader_kwargs.\n stage : Stage\n The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST\n ckpt_prefix : str, None\n Prefix to use for SaveableDataLoader Checkpoint name. The Stage\n name is added to this to create the full key. Set to None to not\n save the DataLoader.\n **loader_kwargs : dict\n Additional keyword arguments to the DataLoader.\n E.g., batch_size, num_workers, pin_memory.\n \"\"\"\n # TRAIN stage is handled specially.\n if stage == sb.Stage.TRAIN:\n loader_kwargs = self._train_loader_specifics(dataset, loader_kwargs)\n dataloader = sb.dataio.dataloader.make_dataloader(\n dataset, **loader_kwargs\n )\n\n if (\n self.checkpointer is not None\n and ckpt_prefix is not None\n and (\n isinstance(dataloader, SaveableDataLoader)\n or isinstance(dataloader, LoopedLoader)\n )\n ):\n ckpt_key = ckpt_prefix + stage.name\n self.checkpointer.add_recoverable(ckpt_key, dataloader)\n return dataloader\n\n def _train_loader_specifics(self, dataset, loader_kwargs):\n sampler = loader_kwargs.get(\"sampler\", None)\n # Shuffling should really only matter for the train stage. Shuffling\n # will also lead to more padding in batches if the order was otherwise\n # sorted by length.\n shuffle = loader_kwargs.get(\"shuffle\", False)\n if shuffle and not self.distributed_launch:\n if sampler is not None:\n raise ValueError(\n \"Cannot specify both shuffle=True\"\n \"and a sampler in loader_kwargs\"\n )\n sampler = ReproducibleRandomSampler(dataset)\n self.train_sampler = sampler\n loader_kwargs[\"sampler\"] = self.train_sampler\n # Delete the shuffle flag, since you cannot specify both a sampler and\n # shuffling:\n del loader_kwargs[\"shuffle\"]\n\n # Possibly make a DistributedSampler or a wrapper for some other sampler\n if self.distributed_launch and not isinstance(dataset, IterableDataset):\n drop_last = loader_kwargs.get(\"drop_last\", False)\n # num_replicas arg is equal to world_size\n # and retrieved automatically within\n # DistributedSampler obj.\n if sampler is not None:\n self.train_sampler = DistributedSamplerWrapper(\n sampler,\n rank=self.rank,\n drop_last=drop_last,\n shuffle=shuffle,\n )\n\n # with DistributedSamplerWrapper, one must disable shuffling for dataloader\n loader_kwargs[\"shuffle\"] = False\n loader_kwargs[\"sampler\"] = self.train_sampler\n elif loader_kwargs.get(\"batch_sampler\") is None:\n # no sampler and batch-sampler\n self.train_sampler = DistributedSampler(\n dataset, rank=self.rank, shuffle=False, drop_last=drop_last\n )\n\n # with DistributedSamplerWrapper, one must disable shuffling for dataloader\n loader_kwargs[\"shuffle\"] = False\n loader_kwargs[\"sampler\"] = self.train_sampler\n else: # batch_sampler was specified\n self.train_sampler = DistributedSamplerWrapper(\n loader_kwargs.get(\"batch_sampler\", None),\n rank=self.rank,\n shuffle=False,\n )\n loader_kwargs[\"batch_sampler\"] = self.train_sampler\n elif self.distributed_launch and isinstance(dataset, IterableDataset):\n logger.warning(\n \"Cannot automatically solve distributed sampling \"\n \"for IterableDataset.\"\n )\n return loader_kwargs\n\n def on_fit_start(self):\n \"\"\"Gets called at the beginning of ``fit()``, on multiple processes\n if ``distributed_count > 0`` and backend is ddp.\n\n Default implementation compiles the jit modules, initializes\n optimizers, and loads the latest checkpoint to resume training.\n \"\"\"\n # Run this *after* starting all processes since jit modules cannot be\n # pickled.\n self._compile_jit()\n\n # Wrap modules with parallel backend after jit\n self._wrap_distributed()\n\n # Initialize optimizers after parameters are configured\n self.init_optimizers()\n\n # Load latest checkpoint to resume training if interrupted\n if self.checkpointer is not None:\n self.checkpointer.recover_if_possible(\n device=torch.device(self.device)\n )\n\n def init_optimizers(self):\n \"\"\"Called during ``on_fit_start()``, initialize optimizers\n after parameters are fully configured (e.g. DDP, jit).\n\n The default implementation of this method depends on an optimizer\n class being passed at initialization that takes only a list\n of parameters (e.g., a lambda or a partial function definition).\n This creates a single optimizer that optimizes all trainable params.\n\n Override this class if there are multiple optimizers.\n \"\"\"\n if self.opt_class is not None:\n self.optimizer = self.opt_class(self.modules.parameters())\n\n if self.checkpointer is not None:\n self.checkpointer.add_recoverable(\"optimizer\", self.optimizer)\n\n def on_evaluate_start(self, max_key=None, min_key=None):\n \"\"\"Gets called at the beginning of ``evaluate()``\n\n Default implementation loads the best-performing checkpoint for\n evaluation, based on stored metrics.\n\n Arguments\n ---------\n max_key : str\n Key to use for finding best checkpoint (higher is better).\n By default, passed to ``self.checkpointer.recover_if_possible()``.\n min_key : str\n Key to use for finding best checkpoint (lower is better).\n By default, passed to ``self.checkpointer.recover_if_possible()``.\n \"\"\"\n\n # Recover best checkpoint for evaluation\n if self.checkpointer is not None:\n self.checkpointer.recover_if_possible(\n max_key=max_key,\n min_key=min_key,\n device=torch.device(self.device),\n )\n\n def fit_batch(self, batch):\n \"\"\"Fit one batch, override to do multiple updates.\n\n The default implementation depends on a few methods being defined\n with a particular behavior:\n\n * ``compute_forward()``\n * ``compute_objectives()``\n\n Also depends on having optimizers passed at initialization.\n\n Arguments\n ---------\n batch : list of torch.Tensors\n Batch of data to use for training. Default implementation assumes\n this batch has two elements: inputs and targets.\n\n Returns\n -------\n detached loss\n \"\"\"\n should_step = self.step % self.grad_accumulation_factor == 0\n # Managing automatic mixed precision\n if self.auto_mix_prec:\n self.optimizer.zero_grad()\n with torch.cuda.amp.autocast():\n outputs = self.compute_forward(batch, Stage.TRAIN)\n loss = self.compute_objectives(outputs, batch, Stage.TRAIN)\n self.scaler.scale(loss / self.grad_accumulation_factor).backward()\n if should_step:\n self.scaler.unscale_(self.optimizer)\n if self.check_gradients(loss):\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer_step += 1\n else:\n outputs = self.compute_forward(batch, Stage.TRAIN)\n loss = self.compute_objectives(outputs, batch, Stage.TRAIN)\n (loss / self.grad_accumulation_factor).backward()\n if should_step:\n if self.check_gradients(loss):\n self.optimizer.step()\n self.optimizer.zero_grad()\n self.optimizer_step += 1\n\n return loss.detach().cpu()\n\n def check_gradients(self, loss):\n \"\"\"Check if gradients are finite and not too large.\n\n Automatically clips large gradients.\n\n Arguments\n ---------\n loss : tensor\n The loss tensor after ``backward()`` has been called but\n before the optimizers ``step()``.\n\n Returns\n -------\n bool\n Whether or not the optimizer step should be carried out.\n \"\"\"\n if not torch.isfinite(loss):\n self.nonfinite_count += 1\n\n # Print helpful debug info\n logger.warn(f\"Loss is {loss}.\")\n for p in self.modules.parameters():\n if not torch.isfinite(p).all():\n logger.warn(\"Parameter is not finite: \" + str(p))\n\n # Check if patience is exhausted\n if self.nonfinite_count > self.nonfinite_patience:\n raise ValueError(\n \"Loss is not finite and patience is exhausted. \"\n \"To debug, wrap `fit()` with \"\n \"autograd's `detect_anomaly()`, e.g.\\n\\nwith \"\n \"torch.autograd.detect_anomaly():\\n\\tbrain.fit(...)\"\n )\n else:\n logger.warn(\"Patience not yet exhausted, ignoring this batch.\")\n return False\n\n # Clip gradient norm\n torch.nn.utils.clip_grad_norm_(\n (p for p in self.modules.parameters()), self.max_grad_norm\n )\n\n return True\n\n def evaluate_batch(self, batch, stage):\n \"\"\"Evaluate one batch, override for different procedure than train.\n\n The default implementation depends on two methods being defined\n with a particular behavior:\n\n * ``compute_forward()``\n * ``compute_objectives()``\n\n Arguments\n ---------\n batch : list of torch.Tensors\n Batch of data to use for evaluation. Default implementation assumes\n this batch has two elements: inputs and targets.\n stage : Stage\n The stage of the experiment: Stage.VALID, Stage.TEST\n\n Returns\n -------\n detached loss\n \"\"\"\n\n out = self.compute_forward(batch, stage=stage)\n loss = self.compute_objectives(out, batch, stage=stage)\n return loss.detach().cpu()\n\n def fit(\n self,\n epoch_counter,\n train_set,\n valid_set=None,\n progressbar=None,\n train_loader_kwargs={},\n valid_loader_kwargs={},\n ):\n \"\"\"Iterate epochs and datasets to improve objective.\n\n Relies on the existence of multiple functions that can (or should) be\n overridden. The following methods are used and expected to have a\n certain behavior:\n\n * ``fit_batch()``\n * ``evaluate_batch()``\n * ``update_average()``\n\n If the initialization was done with distributed_count > 0 and the\n distributed_backend is ddp, this will generally handle multiprocess\n logic, like splitting the training data into subsets for each device and\n only saving a checkpoint on the main process.\n\n Arguments\n ---------\n epoch_counter : iterable\n Each call should return an integer indicating the epoch count.\n train_set : Dataset, DataLoader\n A set of data to use for training. If a Dataset is given, a\n DataLoader is automatically created. If a DataLoader is given, it is\n used directly.\n valid_set : Dataset, DataLoader\n A set of data to use for validation. If a Dataset is given, a\n DataLoader is automatically created. If a DataLoader is given, it is\n used directly.\n train_loader_kwargs : dict\n Kwargs passed to `make_dataloader()` for making the train_loader\n (if train_set is a Dataset, not DataLoader).\n E.G. batch_size, num_workers.\n DataLoader kwargs are all valid.\n valid_loader_kwargs : dict\n Kwargs passed to `make_dataloader()` for making the valid_loader\n (if valid_set is a Dataset, not DataLoader).\n E.g., batch_size, num_workers.\n DataLoader kwargs are all valid.\n progressbar : bool\n Whether to display the progress of each epoch in a progressbar.\n \"\"\"\n\n if not (\n isinstance(train_set, DataLoader)\n or isinstance(train_set, LoopedLoader)\n ):\n train_set = self.make_dataloader(\n train_set, stage=sb.Stage.TRAIN, **train_loader_kwargs\n )\n if valid_set is not None and not (\n isinstance(valid_set, DataLoader)\n or isinstance(valid_set, LoopedLoader)\n ):\n valid_set = self.make_dataloader(\n valid_set,\n stage=sb.Stage.VALID,\n ckpt_prefix=None,\n **valid_loader_kwargs,\n )\n\n self.on_fit_start()\n\n if progressbar is None:\n progressbar = not self.noprogressbar\n\n # Iterate epochs\n for epoch in epoch_counter:\n # Training stage\n self.on_stage_start(Stage.TRAIN, epoch)\n self.modules.train()\n\n # Reset nonfinite count to 0 each epoch\n self.nonfinite_count = 0\n\n if self.train_sampler is not None and hasattr(\n self.train_sampler, \"set_epoch\"\n ):\n self.train_sampler.set_epoch(epoch)\n\n # Time since last intra-epoch checkpoint\n last_ckpt_time = time.time()\n\n # Only show progressbar if requested and main_process\n enable = progressbar and sb.utils.distributed.if_main_process()\n with tqdm(\n train_set,\n initial=self.step,\n dynamic_ncols=True,\n disable=not enable,\n ) as t:\n for batch in t:\n if self._optimizer_step_limit_exceeded:\n logger.info(\"Train iteration limit exceeded\")\n break\n self.step += 1\n loss = self.fit_batch(batch)\n self.avg_train_loss = self.update_average(\n loss, self.avg_train_loss\n )\n t.set_postfix(train_loss=self.avg_train_loss)\n\n # Debug mode only runs a few batches\n if self.debug and self.step == self.debug_batches:\n break\n\n if (\n self.checkpointer is not None\n and self.ckpt_interval_minutes > 0\n and time.time() - last_ckpt_time\n >= self.ckpt_interval_minutes * 60.0\n ):\n # This should not use run_on_main, because that\n # includes a DDP barrier. That eventually leads to a\n # crash when the processes'\n # time.time() - last_ckpt_time differ and some\n # processes enter this block while others don't,\n # missing the barrier.\n if sb.utils.distributed.if_main_process():\n self._save_intra_epoch_ckpt()\n last_ckpt_time = time.time()\n\n # Run train \"on_stage_end\" on all processes\n self.on_stage_end(Stage.TRAIN, self.avg_train_loss, epoch)\n self.avg_train_loss = 0.0\n self.step = 0\n\n # Validation stage\n if valid_set is not None:\n self.on_stage_start(Stage.VALID, epoch)\n self.modules.eval()\n avg_valid_loss = 0.0\n with torch.no_grad():\n for batch in tqdm(\n valid_set, dynamic_ncols=True, disable=not enable\n ):\n self.step += 1\n loss = self.evaluate_batch(batch, stage=Stage.VALID)\n avg_valid_loss = self.update_average(\n loss, avg_valid_loss\n )\n\n # Debug mode only runs a few batches\n if self.debug and self.step == self.debug_batches:\n break\n\n # Only run validation \"on_stage_end\" on main process\n self.step = 0\n run_on_main(\n self.on_stage_end,\n args=[Stage.VALID, avg_valid_loss, epoch],\n )\n\n # Debug mode only runs a few epochs\n if (\n self.debug\n and epoch == self.debug_epochs\n or self._optimizer_step_limit_exceeded\n ):\n break\n\n @property\n def _optimizer_step_limit_exceeded(self):\n return (\n self.optimizer_step_limit is not None\n and self.optimizer_step >= self.optimizer_step_limit\n )\n\n def _save_intra_epoch_ckpt(self):\n \"\"\"Saves a CKPT with specific intra-epoch flag.\"\"\"\n self.checkpointer.save_and_keep_only(\n end_of_epoch=False,\n num_to_keep=1,\n ckpt_predicate=lambda c: INTRA_EPOCH_CKPT_FLAG in c.meta,\n meta={INTRA_EPOCH_CKPT_FLAG: True},\n verbosity=logging.DEBUG,\n )\n\n def _compile_jit(self):\n \"\"\"Compile requested modules with ``torch.jit.script``.\"\"\"\n if self.jit_module_keys is None:\n return\n\n for name in self.jit_module_keys:\n if name not in self.modules:\n raise ValueError(\n \"module\" + name + \" is not defined in your hparams file.\"\n )\n module = torch.jit.script(self.modules[name])\n self.modules[name] = module.to(self.device)\n\n def _wrap_distributed(self):\n \"\"\"Wrap modules with distributed wrapper when requested.\"\"\"\n if not self.distributed_launch and not self.data_parallel_backend:\n return\n elif self.distributed_launch:\n for name, module in self.modules.items():\n if any(p.requires_grad for p in module.parameters()):\n module = SyncBatchNorm.convert_sync_batchnorm(module)\n module = DDP(\n module,\n device_ids=[self.device],\n find_unused_parameters=self.find_unused_parameters,\n )\n self.modules[name] = module\n else:\n # data_parallel_backend\n for name, module in self.modules.items():\n if any(p.requires_grad for p in module.parameters()):\n module = DP(module)\n self.modules[name] = module\n\n def evaluate(\n self,\n test_set,\n max_key=None,\n min_key=None,\n progressbar=None,\n test_loader_kwargs={},\n ):\n \"\"\"Iterate test_set and evaluate brain performance. By default, loads\n the best-performing checkpoint (as recorded using the checkpointer).\n\n Arguments\n ---------\n test_set : Dataset, DataLoader\n If a DataLoader is given, it is iterated directly. Otherwise passed\n to ``self.make_dataloader()``.\n max_key : str\n Key to use for finding best checkpoint, passed to\n ``on_evaluate_start()``.\n min_key : str\n Key to use for finding best checkpoint, passed to\n ``on_evaluate_start()``.\n progressbar : bool\n Whether to display the progress in a progressbar.\n test_loader_kwargs : dict\n Kwargs passed to ``make_dataloader()`` if ``test_set`` is not a\n DataLoader. NOTE: ``loader_kwargs[\"ckpt_prefix\"]`` gets\n automatically overwritten to ``None`` (so that the test DataLoader\n is not added to the checkpointer).\n\n Returns\n -------\n average test loss\n \"\"\"\n if progressbar is None:\n progressbar = not self.noprogressbar\n\n if not (\n isinstance(test_set, DataLoader)\n or isinstance(test_set, LoopedLoader)\n ):\n test_loader_kwargs[\"ckpt_prefix\"] = None\n test_set = self.make_dataloader(\n test_set, Stage.TEST, **test_loader_kwargs\n )\n self.on_evaluate_start(max_key=max_key, min_key=min_key)\n self.on_stage_start(Stage.TEST, epoch=None)\n self.modules.eval()\n avg_test_loss = 0.0\n with torch.no_grad():\n for batch in tqdm(\n test_set, dynamic_ncols=True, disable=not progressbar\n ):\n self.step += 1\n loss = self.evaluate_batch(batch, stage=Stage.TEST)\n avg_test_loss = self.update_average(loss, avg_test_loss)\n\n # Debug mode only runs a few batches\n if self.debug and self.step == self.debug_batches:\n break\n\n # Only run evaluation \"on_stage_end\" on main process\n run_on_main(\n self.on_stage_end, args=[Stage.TEST, avg_test_loss, None]\n )\n self.step = 0\n return avg_test_loss\n\n def update_average(self, loss, avg_loss):\n \"\"\"Update running average of the loss.\n\n Arguments\n ---------\n loss : torch.tensor\n detached loss, a single float value.\n avg_loss : float\n current running average.\n\n Returns\n -------\n avg_loss : float\n The average loss.\n \"\"\"\n if torch.isfinite(loss):\n avg_loss -= avg_loss / self.step\n avg_loss += float(loss) / self.step\n return avg_loss\n\n @sb.utils.checkpoints.mark_as_saver\n def _save(self, path):\n save_dict = {\n \"step\": self.step,\n \"avg_train_loss\": self.avg_train_loss,\n \"optimizer_step\": self.optimizer_step,\n }\n with open(path, \"w\") as w:\n w.write(yaml.dump(save_dict))\n\n @sb.utils.checkpoints.mark_as_loader\n def _recover(self, path, end_of_epoch, device):\n del end_of_epoch\n del device\n with open(path) as f:\n save_dict = yaml.safe_load(f)\n self.step = save_dict[\"step\"]\n self.avg_train_loss = save_dict[\"avg_train_loss\"]\n self.optimizer_step = save_dict[\"optimizer_step\"]\n", "path": "speechbrain/core.py" } ]
diff --git a/speechbrain/core.py b/speechbrain/core.py index 398b12a1e6..95d9062299 100644 --- a/speechbrain/core.py +++ b/speechbrain/core.py @@ -1233,6 +1233,7 @@ def evaluate( self.on_stage_end, args=[Stage.TEST, avg_test_loss, None] ) self.step = 0 + return avg_test_loss def update_average(self, loss, avg_loss): """Update running average of the loss.
StackStorm__st2-5092
Remove obsolete 'bin/st2-check-license' The project ships https://github.com/StackStorm/st2/blob/master/st2common/bin/st2-check-license which is irrelevant now, considering ongoing EWC features integration in the st2 core. The task is to find all the places: https://github.com/search?q=org%3AStackStorm+st2-check-license&type=code and remove the `st2-check-license` scripts. This is an easy `good first issue` for someone willing to start contributing and exploring the st2 system. Bonus points to find other obsolete, outdated, irrelevant scripts in st2 core. Help wanted!
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\nfrom dist_utils import get_version_string\n\nST2_COMPONENT = 'st2common'\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')\nINIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\n\napply_vagrant_workaround()\nsetup(\n name=ST2_COMPONENT,\n version=get_version_string(INIT_FILE),\n description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),\n author='StackStorm',\n author_email='[email protected]',\n license='Apache License (2.0)',\n url='https://stackstorm.com/',\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['setuptools', 'tests']),\n scripts=[\n 'bin/st2-bootstrap-rmq',\n 'bin/st2-cleanup-db',\n 'bin/st2-register-content',\n 'bin/st2-purge-executions',\n 'bin/st2-purge-trigger-instances',\n 'bin/st2-run-pack-tests',\n 'bin/st2ctl',\n 'bin/st2-generate-symmetric-crypto-key',\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n 'bin/st2-check-license',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n ],\n entry_points={\n 'st2common.metrics.driver': [\n 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',\n 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',\n 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'\n ],\n 'st2common.rbac.backend': [\n 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'\n ],\n }\n)\n", "path": "st2common/setup.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\nfrom dist_utils import get_version_string\n\nST2_COMPONENT = 'st2common'\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')\nINIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\n\napply_vagrant_workaround()\nsetup(\n name=ST2_COMPONENT,\n version=get_version_string(INIT_FILE),\n description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),\n author='StackStorm',\n author_email='[email protected]',\n license='Apache License (2.0)',\n url='https://stackstorm.com/',\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['setuptools', 'tests']),\n scripts=[\n 'bin/st2-bootstrap-rmq',\n 'bin/st2-cleanup-db',\n 'bin/st2-register-content',\n 'bin/st2-purge-executions',\n 'bin/st2-purge-trigger-instances',\n 'bin/st2-run-pack-tests',\n 'bin/st2ctl',\n 'bin/st2-generate-symmetric-crypto-key',\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n ],\n entry_points={\n 'st2common.metrics.driver': [\n 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',\n 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',\n 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'\n ],\n 'st2common.rbac.backend': [\n 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'\n ],\n }\n)\n", "path": "st2common/setup.py" } ]
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 675b6b48c4..6a867cf8e2 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -27,6 +27,11 @@ Fixed Contributed by @guzzijones +Removed +~~~~~~~~ +* Removed check-licence script (cleanup) #5092 + Contributed by @kroustou + 3.3.0 - October 06, 2020 ------------------------ diff --git a/st2common/bin/st2-check-license b/st2common/bin/st2-check-license deleted file mode 100755 index e9d56e362f..0000000000 --- a/st2common/bin/st2-check-license +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- -#!/usr/bin/env bash -# Licensed to the StackStorm, Inc ('StackStorm') under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [[ $# -eq 0 ]] ; then - echo "No license key entered" - echo "Usage: $0 <license key to check>" - exit 1 -fi - -LICENSE=$@ -BWC_LICENSE_CHECK_URL="https://${LICENSE}:@packagecloud.io/install/repositories/StackStorm/enterprise/script.deb.sh" -LICENSE_CURL_TEST=`curl --output /dev/null --silent --fail ${BWC_LICENSE_CHECK_URL}` - -if [ $? -ne 0 ]; then - echo -e "[x] License is not valid" - exit 2 -else - echo -e "[*] License is valid" - exit 0 -fi diff --git a/st2common/setup.py b/st2common/setup.py index b33a6f14c8..f68679af8c 100644 --- a/st2common/setup.py +++ b/st2common/setup.py @@ -57,7 +57,6 @@ 'bin/st2-self-check', 'bin/st2-track-result', 'bin/st2-validate-pack-config', - 'bin/st2-check-license', 'bin/st2-pack-install', 'bin/st2-pack-download', 'bin/st2-pack-setup-virtualenv'
pfnet__pytorch-pfn-extras-698
FP16 evaluator Add an option to `ppe.training.Evaluator` to allow `autocast` to be applied.
[ { "content": "import contextlib\nimport dataclasses\nimport warnings\nfrom typing import Any, Dict, Generator, Iterable, Mapping, Optional\n\nimport torch\nfrom pytorch_pfn_extras.handler._code_block import forward, update_parameters\nfrom pytorch_pfn_extras.runtime import _autocast\n\n\n# Deprecated: kept for backward compatibility of user code\[email protected]\ndef torch_autocast(enabled: bool = True) -> Generator[None, None, None]:\n if _autocast._cuda_amp_available:\n with torch.cuda.amp.autocast(enabled): # type: ignore[no-untyped-call]\n yield\n else:\n yield\n\n\ndef _normalize_outputs(outputs: Any) -> Dict[str, Any]:\n target: Dict[str, Any]\n if isinstance(outputs, tuple) and hasattr(outputs, \"_fields\"):\n # namedtuple\n target = outputs._asdict() # type: ignore[attr-defined]\n elif isinstance(outputs, dict):\n target = outputs\n elif isinstance(outputs, (list, tuple)):\n target = {str(i): out for i, out in enumerate(outputs)}\n else:\n target = {\"0\": outputs}\n return target\n\n\nclass BaseLogic:\n def __init__(self, options: Optional[Dict[str, Any]] = None):\n super().__init__()\n options = options.copy() if options else {}\n self.consume_options(options)\n\n def consume_options(self, options: Dict[str, Any]) -> None:\n \"\"\"A method to update options of Logic.\n\n Note that the given dict will be modified.\n\n Args:\n options (dict): Option key-values to be set.\n \"\"\"\n pass\n\n def train_epoch_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n epoch: int,\n loader: Iterable[Any],\n ) -> None:\n \"\"\"A method called when starting a new epoch of training.\n\n Args:\n epoch (int): Number of epochs already finished.\n models (dict of torch.nn.Module): The models.\n loader (torch.utils.data.DataLoader): The data loder.\n \"\"\"\n pass\n\n def train_epoch_end(\n self,\n models: Mapping[str, torch.nn.Module],\n epoch: int,\n ) -> None:\n \"\"\"A method called when completing an epoch of training.\n\n Args:\n epoch (int): Number of epochs already finished.\n models (dict of torch.nn.Module): The models.\n \"\"\"\n pass\n\n def train_step(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method invokes the models forward and backward passes.\n\n Optimizing is left to `train_step_optimizers` since maybe the user\n would like to aggregate the gradients of several iterations.\n\n Args:\n models (dict of torch.nn.Module):\n The models.\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of training steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n pass\n\n def train_step_optimizers(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n ) -> None:\n \"\"\"A method in charge of stepping the provided optimizers.\n\n Args:\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of steps already finished.\n \"\"\"\n pass\n\n def train_validation_begin(\n self, models: Mapping[str, torch.nn.Module]\n ) -> None:\n \"\"\"A method called when starting a validation.\n\n Args:\n models (dict of torch.nn.Module): The models.\n \"\"\"\n pass\n\n def train_validation_end(\n self,\n models: Mapping[str, torch.nn.Module],\n ) -> None:\n \"\"\"A method called when the validation completes.\n\n Args:\n models (dict of torch.nn.Module): The models.\n \"\"\"\n pass\n\n def eval_step(\n self,\n models: Mapping[str, torch.nn.Module],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method for an evaluation step.\n\n Args:\n models (dict of torch.nn.Module): The models.\n batch_idx (int): Number of steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n pass\n\n\nclass Logic(BaseLogic):\n def __init__(\n self,\n model_name: str = \"main\",\n options: Optional[Dict[str, Any]] = None,\n ) -> None:\n \"\"\"A set of methods that defines the training logic.\n\n Args:\n model_name (str): Name of the model. Default is ``'main'``.\n options (dict, optional): The configuration options.\n\n * ``'backward_outputs'`` (list of str):\n A list of names of outputs that require compution of\n the gradient.\n * ``'autocast'`` (bool or dict):\n If ``True``, ``torch.autocast`` (or ``torch.cuda.amp.autocast`` for PyTorch 1.9 or earlier) is enabled,\n using ``{\"enabled\": True, \"device_type\": \"cuda\"}``\n as autocast options.\n The default is ``False`` which corresponds to the following options\n ``{\"enabled\": False, \"device_type\": \"cuda\"}``.\n If dict, options are passed to ``torch.autocast``.\n * ``'grad_scaler'`` (torch.cuda.amp.GradScaler):\n A gradient scaler that outputs are applied to.\n \"\"\"\n super().__init__(options)\n self.model_name = model_name\n\n def consume_options(self, options: Dict[str, Any]) -> None:\n super().consume_options(options)\n\n self.backward_outputs = options.pop(\"backward_outputs\", None)\n self._grad_scaler = options.pop(\"grad_scaler\", None)\n\n self._backward_fn = options.pop(\"backward_function\", None)\n autocast_options = options.pop(\"autocast\", False)\n if isinstance(autocast_options, bool):\n autocast_options = {\n \"enabled\": autocast_options,\n \"device_type\": \"cuda\",\n }\n self._autocast = _autocast._AutocastManager(\n autocast_options, self._grad_scaler is not None\n )\n\n if self._grad_scaler is not None:\n if not isinstance(self._grad_scaler, torch.cuda.amp.GradScaler):\n raise RuntimeError(\n \"grad_scaler should be a \"\n \"torch.cuda.amp.GradScaler object\"\n )\n\n def _forward(self, model: torch.nn.Module, batch: Any) -> Any:\n if isinstance(batch, tuple) and hasattr(batch, \"_fields\"):\n # namedtuple\n return model(batch)\n if isinstance(batch, dict):\n return model(**batch)\n if isinstance(batch, (list, tuple)):\n return model(*batch)\n return model(batch)\n\n def _backward(self, outputs: Dict[str, Any]) -> None:\n to_backward = set()\n if self.backward_outputs is None:\n for _, v in outputs.items():\n if (\n isinstance(v, torch.Tensor)\n and v.grad_fn is not None\n and (\n (\n v.numel() == 1\n and (\n v.dtype.is_floating_point or v.dtype.is_complex\n )\n )\n )\n ):\n to_backward.add(v)\n else:\n # If backward is requested, we tried to execute it no matter the\n # shape or type of the tensor to make the user aware\n backward_outputs = self.backward_outputs\n if type(backward_outputs) is str:\n backward_outputs = (backward_outputs,)\n for k in backward_outputs:\n try:\n v = outputs[k]\n if isinstance(v, torch.Tensor) and v.grad_fn is not None:\n to_backward.add(v)\n except KeyError:\n warnings.warn(\n \"Couldn't find requested backward value: \"\n f\"{k} in {outputs.keys()}\"\n )\n\n for v in to_backward:\n if self._backward_fn is None:\n v.backward() # type: ignore[no-untyped-call]\n else:\n self._backward_fn(v)\n\n def train_epoch_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n epoch: int,\n loader: Iterable[Any],\n ) -> None:\n \"\"\"A method called when starting a new epoch of training.\n\n Args:\n epoch (int): Number of epochs already finished.\n models (dict of torch.nn.Module): The models.\n loader (torch.utils.data.DataLoader): The data loder.\n \"\"\"\n model = models[self.model_name]\n model.train()\n if hasattr(loader, \"sampler\") and hasattr(\n loader.sampler, \"set_epoch\"\n ): # type: ignore[attr-defined]\n # Needed for `torch.utils.data.DistributedSampler`\n loader.sampler.set_epoch(epoch) # type: ignore[attr-defined]\n\n def train_epoch_end(self, models: Mapping[str, Any], epoch: int) -> None:\n model = models[self.model_name]\n model.eval()\n\n def train_step(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method invokes the model forward and backward passes.\n\n Optimizing is left to `train_step_optimizers` since maybe the user\n would like to aggregate the gradients of several iterations.\n\n Args:\n models (dict of torch.nn.Module):\n The models.\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of training steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n with self._autocast.autocast():\n optimizers[self.model_name].zero_grad()\n outs = self._forward(models[self.model_name], batch)\n to_back_outs = _normalize_outputs(outs)\n if self._grad_scaler is not None:\n assert (\n len(to_back_outs) == 1\n ), \"loss scaling with multiple outputs is not supported\"\n to_back_outs = {\n k: self._grad_scaler.scale(v)\n for k, v in to_back_outs.items()\n }\n self._backward(to_back_outs)\n return outs\n\n def train_step_optimizers(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n ) -> None:\n \"\"\"A method in charge of stepping the provided optimizers.\n\n Also a grad scaler will be used if defined.\n\n Args:\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of steps already finished.\n \"\"\"\n optimizer = optimizers[self.model_name]\n if self._grad_scaler is not None:\n self._grad_scaler.step(optimizer)\n self._grad_scaler.update()\n else:\n optimizer.step()\n\n def train_validation_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n ) -> None:\n \"\"\"A method called when starting a validation.\n\n Args:\n models (dict of torch.nn.Module): The models.\n \"\"\"\n model = models[self.model_name]\n model.eval()\n\n def train_validation_end(self, models: Mapping[str, Any]) -> None:\n model = models[self.model_name]\n model.train()\n\n def eval_step(\n self,\n models: Mapping[str, torch.nn.Module],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method for an evaluation step.\n\n Args:\n models (dict of torch.nn.Module): The models.\n batch_idx (int): Number of steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n model = models[self.model_name]\n outs = self._forward(model, batch)\n return outs\n\n\nclass CodeBlockLogic(BaseLogic):\n def __init__(\n self,\n model_name: str = \"main\",\n options: Optional[Dict[str, Any]] = None,\n ) -> None:\n \"\"\"A set of methods that defines the training logic.\n\n Args:\n model_name (str): Name of the model. Default is ``'main'``.\n options (dict, optional): The configuration options.\n\n * ``'backward_outputs'`` (list of str):\n A list of names of outputs that require compution of\n the gradient.\n \"\"\"\n super().__init__(options)\n self.model_name = model_name\n\n def consume_options(self, options: Dict[str, Any]) -> None:\n super().consume_options(options)\n\n self.backward_outputs = options.pop(\"backward_outputs\", None)\n if self.backward_outputs is not None:\n assert isinstance(self.backward_outputs, str)\n\n def train_epoch_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n epoch: int,\n loader: Iterable[Any],\n ) -> None:\n \"\"\"A method called when starting a new epoch of training.\n\n Args:\n epoch (int): Number of epochs already finished.\n models (dict of torch.nn.Module): The models.\n loader (torch.utils.data.DataLoader): The data loder.\n \"\"\"\n model = models[self.model_name]\n model.train()\n if hasattr(loader, \"sampler\") and hasattr(\n loader.sampler, \"set_epoch\"\n ): # type: ignore[attr-defined]\n # Needed for `torch.utils.data.DistributedSampler`\n loader.sampler.set_epoch(epoch) # type: ignore[attr-defined]\n\n def train_epoch_end(self, models: Mapping[str, Any], epoch: int) -> None:\n model = models[self.model_name]\n model.eval()\n\n def train_step(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method invokes the model forward and backward passes.\n\n Optimizing is left to `train_step_optimizers` since maybe the user\n would like to aggregate the gradients of several iterations.\n\n Args:\n models (dict of torch.nn.Module):\n The models.\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of training steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n module = models[self.model_name]\n\n return update_parameters(\n module,\n list(optimizers.values()),\n self.backward_outputs,\n None,\n )(batch)\n\n def train_validation_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n ) -> None:\n \"\"\"A method called when starting a validation.\n\n Args:\n models (dict of torch.nn.Module): The models.\n \"\"\"\n model = models[self.model_name]\n model.eval()\n\n def train_validation_end(self, models: Mapping[str, Any]) -> None:\n model = models[self.model_name]\n model.train()\n\n def eval_step(\n self,\n models: Mapping[str, torch.nn.Module],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method for an evaluation step.\n\n Args:\n models (dict of torch.nn.Module): The models.\n batch_idx (int): Number of steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n model = models[self.model_name]\n outs = forward(model)(batch)\n return outs\n\n\[email protected]\nclass ClousureModelOutput:\n outs: Any\n loss: torch.Tensor\n\n def __float__(self) -> float:\n return float(self.loss)\n\n\nclass ClousureLogic(Logic):\n def consume_options(self, options: Dict[str, Any]) -> None:\n super().consume_options(options)\n if self._grad_scaler is not None:\n raise RuntimeError(\n \"torch.cuda.amp.GradScaler does not support clousure step mode.\"\n )\n\n def train_step(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method invokes the model forward and backward passes and performs an optimization step.\n\n Args:\n models (dict of torch.nn.Module):\n The models.\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of training steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n\n def clousure() -> ClousureModelOutput:\n with self._autocast.autocast():\n optimizers[self.model_name].zero_grad()\n outs = self._forward(models[self.model_name], batch)\n to_back_outs = _normalize_outputs(outs)\n if len(to_back_outs) > 1:\n raise RuntimeError(\n \"Clousure step with multiple outputs is not supported.\"\n )\n elif len(to_back_outs) == 0:\n raise RuntimeError(\"No backward target found.\")\n\n self._backward(to_back_outs)\n (loss,) = to_back_outs.values()\n return ClousureModelOutput(\n outs=outs,\n loss=loss,\n )\n\n optimizer = optimizers[self.model_name]\n clousure_model_output: ClousureModelOutput = optimizer.step(clousure) # type: ignore\n if not isinstance(clousure_model_output, ClousureModelOutput):\n raise RuntimeError(\n f\"{type(clousure_model_output)} type object returned from optimizer.step with clousure. optimizer.step is expected to return ppe.handler.ClousureModelOutput.\"\n )\n return clousure_model_output.outs\n\n def train_step_optimizers(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n ) -> None:\n \"\"\"In clousure mode, the stepping of the optimizer cannot be changed.\n\n If you want to change the stepping of the optimizer, please use the normal Logic class.\n\n Args:\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of steps already finished.\n \"\"\"\n pass\n", "path": "pytorch_pfn_extras/handler/_logic.py" } ]
[ { "content": "import contextlib\nimport dataclasses\nimport warnings\nfrom typing import Any, Dict, Generator, Iterable, Mapping, Optional\n\nimport torch\nfrom pytorch_pfn_extras.handler._code_block import forward, update_parameters\nfrom pytorch_pfn_extras.runtime import _autocast\n\n\n# Deprecated: kept for backward compatibility of user code\[email protected]\ndef torch_autocast(enabled: bool = True) -> Generator[None, None, None]:\n if _autocast._cuda_amp_available:\n with torch.cuda.amp.autocast(enabled): # type: ignore[no-untyped-call]\n yield\n else:\n yield\n\n\ndef _normalize_outputs(outputs: Any) -> Dict[str, Any]:\n target: Dict[str, Any]\n if isinstance(outputs, tuple) and hasattr(outputs, \"_fields\"):\n # namedtuple\n target = outputs._asdict() # type: ignore[attr-defined]\n elif isinstance(outputs, dict):\n target = outputs\n elif isinstance(outputs, (list, tuple)):\n target = {str(i): out for i, out in enumerate(outputs)}\n else:\n target = {\"0\": outputs}\n return target\n\n\nclass BaseLogic:\n def __init__(self, options: Optional[Dict[str, Any]] = None):\n super().__init__()\n options = options.copy() if options else {}\n self.consume_options(options)\n\n def consume_options(self, options: Dict[str, Any]) -> None:\n \"\"\"A method to update options of Logic.\n\n Note that the given dict will be modified.\n\n Args:\n options (dict): Option key-values to be set.\n \"\"\"\n pass\n\n def train_epoch_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n epoch: int,\n loader: Iterable[Any],\n ) -> None:\n \"\"\"A method called when starting a new epoch of training.\n\n Args:\n epoch (int): Number of epochs already finished.\n models (dict of torch.nn.Module): The models.\n loader (torch.utils.data.DataLoader): The data loder.\n \"\"\"\n pass\n\n def train_epoch_end(\n self,\n models: Mapping[str, torch.nn.Module],\n epoch: int,\n ) -> None:\n \"\"\"A method called when completing an epoch of training.\n\n Args:\n epoch (int): Number of epochs already finished.\n models (dict of torch.nn.Module): The models.\n \"\"\"\n pass\n\n def train_step(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method invokes the models forward and backward passes.\n\n Optimizing is left to `train_step_optimizers` since maybe the user\n would like to aggregate the gradients of several iterations.\n\n Args:\n models (dict of torch.nn.Module):\n The models.\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of training steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n pass\n\n def train_step_optimizers(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n ) -> None:\n \"\"\"A method in charge of stepping the provided optimizers.\n\n Args:\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of steps already finished.\n \"\"\"\n pass\n\n def train_validation_begin(\n self, models: Mapping[str, torch.nn.Module]\n ) -> None:\n \"\"\"A method called when starting a validation.\n\n Args:\n models (dict of torch.nn.Module): The models.\n \"\"\"\n pass\n\n def train_validation_end(\n self,\n models: Mapping[str, torch.nn.Module],\n ) -> None:\n \"\"\"A method called when the validation completes.\n\n Args:\n models (dict of torch.nn.Module): The models.\n \"\"\"\n pass\n\n def eval_step(\n self,\n models: Mapping[str, torch.nn.Module],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method for an evaluation step.\n\n Args:\n models (dict of torch.nn.Module): The models.\n batch_idx (int): Number of steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n pass\n\n\nclass Logic(BaseLogic):\n def __init__(\n self,\n model_name: str = \"main\",\n options: Optional[Dict[str, Any]] = None,\n ) -> None:\n \"\"\"A set of methods that defines the training logic.\n\n Args:\n model_name (str): Name of the model. Default is ``'main'``.\n options (dict, optional): The configuration options.\n\n * ``'backward_outputs'`` (list of str):\n A list of names of outputs that require compution of\n the gradient.\n * ``'autocast'`` (bool or dict):\n If ``True``, ``torch.autocast`` (or ``torch.cuda.amp.autocast`` for PyTorch 1.9 or earlier) is enabled,\n using ``{\"enabled\": True, \"device_type\": \"cuda\"}``\n as autocast options.\n The default is ``False`` which corresponds to the following options\n ``{\"enabled\": False, \"device_type\": \"cuda\"}``.\n If dict, options are passed to ``torch.autocast``.\n * ``'grad_scaler'`` (torch.cuda.amp.GradScaler):\n A gradient scaler that outputs are applied to.\n \"\"\"\n super().__init__(options)\n self.model_name = model_name\n\n def consume_options(self, options: Dict[str, Any]) -> None:\n super().consume_options(options)\n\n self.backward_outputs = options.pop(\"backward_outputs\", None)\n self._grad_scaler = options.pop(\"grad_scaler\", None)\n\n self._backward_fn = options.pop(\"backward_function\", None)\n autocast_options = options.pop(\"autocast\", False)\n if isinstance(autocast_options, bool):\n autocast_options = {\n \"enabled\": autocast_options,\n \"device_type\": \"cuda\",\n }\n self._autocast = _autocast._AutocastManager(\n autocast_options, self._grad_scaler is not None\n )\n\n if self._grad_scaler is not None:\n if not isinstance(self._grad_scaler, torch.cuda.amp.GradScaler):\n raise RuntimeError(\n \"grad_scaler should be a \"\n \"torch.cuda.amp.GradScaler object\"\n )\n\n def _forward(self, model: torch.nn.Module, batch: Any) -> Any:\n if isinstance(batch, tuple) and hasattr(batch, \"_fields\"):\n # namedtuple\n return model(batch)\n if isinstance(batch, dict):\n return model(**batch)\n if isinstance(batch, (list, tuple)):\n return model(*batch)\n return model(batch)\n\n def _backward(self, outputs: Dict[str, Any]) -> None:\n to_backward = set()\n if self.backward_outputs is None:\n for _, v in outputs.items():\n if (\n isinstance(v, torch.Tensor)\n and v.grad_fn is not None\n and (\n (\n v.numel() == 1\n and (\n v.dtype.is_floating_point or v.dtype.is_complex\n )\n )\n )\n ):\n to_backward.add(v)\n else:\n # If backward is requested, we tried to execute it no matter the\n # shape or type of the tensor to make the user aware\n backward_outputs = self.backward_outputs\n if type(backward_outputs) is str:\n backward_outputs = (backward_outputs,)\n for k in backward_outputs:\n try:\n v = outputs[k]\n if isinstance(v, torch.Tensor) and v.grad_fn is not None:\n to_backward.add(v)\n except KeyError:\n warnings.warn(\n \"Couldn't find requested backward value: \"\n f\"{k} in {outputs.keys()}\"\n )\n\n for v in to_backward:\n if self._backward_fn is None:\n v.backward() # type: ignore[no-untyped-call]\n else:\n self._backward_fn(v)\n\n def train_epoch_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n epoch: int,\n loader: Iterable[Any],\n ) -> None:\n \"\"\"A method called when starting a new epoch of training.\n\n Args:\n epoch (int): Number of epochs already finished.\n models (dict of torch.nn.Module): The models.\n loader (torch.utils.data.DataLoader): The data loder.\n \"\"\"\n model = models[self.model_name]\n model.train()\n if hasattr(loader, \"sampler\") and hasattr(\n loader.sampler, \"set_epoch\"\n ): # type: ignore[attr-defined]\n # Needed for `torch.utils.data.DistributedSampler`\n loader.sampler.set_epoch(epoch) # type: ignore[attr-defined]\n\n def train_epoch_end(self, models: Mapping[str, Any], epoch: int) -> None:\n model = models[self.model_name]\n model.eval()\n\n def train_step(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method invokes the model forward and backward passes.\n\n Optimizing is left to `train_step_optimizers` since maybe the user\n would like to aggregate the gradients of several iterations.\n\n Args:\n models (dict of torch.nn.Module):\n The models.\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of training steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n with self._autocast.autocast():\n optimizers[self.model_name].zero_grad()\n outs = self._forward(models[self.model_name], batch)\n to_back_outs = _normalize_outputs(outs)\n if self._grad_scaler is not None:\n assert (\n len(to_back_outs) == 1\n ), \"loss scaling with multiple outputs is not supported\"\n to_back_outs = {\n k: self._grad_scaler.scale(v)\n for k, v in to_back_outs.items()\n }\n self._backward(to_back_outs)\n return outs\n\n def train_step_optimizers(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n ) -> None:\n \"\"\"A method in charge of stepping the provided optimizers.\n\n Also a grad scaler will be used if defined.\n\n Args:\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of steps already finished.\n \"\"\"\n optimizer = optimizers[self.model_name]\n if self._grad_scaler is not None:\n self._grad_scaler.step(optimizer)\n self._grad_scaler.update()\n else:\n optimizer.step()\n\n def train_validation_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n ) -> None:\n \"\"\"A method called when starting a validation.\n\n Args:\n models (dict of torch.nn.Module): The models.\n \"\"\"\n model = models[self.model_name]\n model.eval()\n\n def train_validation_end(self, models: Mapping[str, Any]) -> None:\n model = models[self.model_name]\n model.train()\n\n def eval_step(\n self,\n models: Mapping[str, torch.nn.Module],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method for an evaluation step.\n\n Args:\n models (dict of torch.nn.Module): The models.\n batch_idx (int): Number of steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n model = models[self.model_name]\n with self._autocast.autocast():\n outs = self._forward(model, batch)\n return outs\n\n\nclass CodeBlockLogic(BaseLogic):\n def __init__(\n self,\n model_name: str = \"main\",\n options: Optional[Dict[str, Any]] = None,\n ) -> None:\n \"\"\"A set of methods that defines the training logic.\n\n Args:\n model_name (str): Name of the model. Default is ``'main'``.\n options (dict, optional): The configuration options.\n\n * ``'backward_outputs'`` (list of str):\n A list of names of outputs that require compution of\n the gradient.\n \"\"\"\n super().__init__(options)\n self.model_name = model_name\n\n def consume_options(self, options: Dict[str, Any]) -> None:\n super().consume_options(options)\n\n self.backward_outputs = options.pop(\"backward_outputs\", None)\n if self.backward_outputs is not None:\n assert isinstance(self.backward_outputs, str)\n\n def train_epoch_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n epoch: int,\n loader: Iterable[Any],\n ) -> None:\n \"\"\"A method called when starting a new epoch of training.\n\n Args:\n epoch (int): Number of epochs already finished.\n models (dict of torch.nn.Module): The models.\n loader (torch.utils.data.DataLoader): The data loder.\n \"\"\"\n model = models[self.model_name]\n model.train()\n if hasattr(loader, \"sampler\") and hasattr(\n loader.sampler, \"set_epoch\"\n ): # type: ignore[attr-defined]\n # Needed for `torch.utils.data.DistributedSampler`\n loader.sampler.set_epoch(epoch) # type: ignore[attr-defined]\n\n def train_epoch_end(self, models: Mapping[str, Any], epoch: int) -> None:\n model = models[self.model_name]\n model.eval()\n\n def train_step(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method invokes the model forward and backward passes.\n\n Optimizing is left to `train_step_optimizers` since maybe the user\n would like to aggregate the gradients of several iterations.\n\n Args:\n models (dict of torch.nn.Module):\n The models.\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of training steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n module = models[self.model_name]\n\n return update_parameters(\n module,\n list(optimizers.values()),\n self.backward_outputs,\n None,\n )(batch)\n\n def train_validation_begin(\n self,\n models: Mapping[str, torch.nn.Module],\n ) -> None:\n \"\"\"A method called when starting a validation.\n\n Args:\n models (dict of torch.nn.Module): The models.\n \"\"\"\n model = models[self.model_name]\n model.eval()\n\n def train_validation_end(self, models: Mapping[str, Any]) -> None:\n model = models[self.model_name]\n model.train()\n\n def eval_step(\n self,\n models: Mapping[str, torch.nn.Module],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method for an evaluation step.\n\n Args:\n models (dict of torch.nn.Module): The models.\n batch_idx (int): Number of steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n model = models[self.model_name]\n outs = forward(model)(batch)\n return outs\n\n\[email protected]\nclass ClousureModelOutput:\n outs: Any\n loss: torch.Tensor\n\n def __float__(self) -> float:\n return float(self.loss)\n\n\nclass ClousureLogic(Logic):\n def consume_options(self, options: Dict[str, Any]) -> None:\n super().consume_options(options)\n if self._grad_scaler is not None:\n raise RuntimeError(\n \"torch.cuda.amp.GradScaler does not support clousure step mode.\"\n )\n\n def train_step(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n batch: Any,\n ) -> Any:\n \"\"\"A method invokes the model forward and backward passes and performs an optimization step.\n\n Args:\n models (dict of torch.nn.Module):\n The models.\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of training steps already finished.\n batch (torch.Tensor, list of torch.Tensor, dict of torch.Tensor):\n Input tensors feeded to the model of the current step.\n \"\"\"\n\n def clousure() -> ClousureModelOutput:\n with self._autocast.autocast():\n optimizers[self.model_name].zero_grad()\n outs = self._forward(models[self.model_name], batch)\n to_back_outs = _normalize_outputs(outs)\n if len(to_back_outs) > 1:\n raise RuntimeError(\n \"Clousure step with multiple outputs is not supported.\"\n )\n elif len(to_back_outs) == 0:\n raise RuntimeError(\"No backward target found.\")\n\n self._backward(to_back_outs)\n (loss,) = to_back_outs.values()\n return ClousureModelOutput(\n outs=outs,\n loss=loss,\n )\n\n optimizer = optimizers[self.model_name]\n clousure_model_output: ClousureModelOutput = optimizer.step(clousure) # type: ignore\n if not isinstance(clousure_model_output, ClousureModelOutput):\n raise RuntimeError(\n f\"{type(clousure_model_output)} type object returned from optimizer.step with clousure. optimizer.step is expected to return ppe.handler.ClousureModelOutput.\"\n )\n return clousure_model_output.outs\n\n def train_step_optimizers(\n self,\n models: Mapping[str, torch.nn.Module],\n optimizers: Mapping[str, torch.optim.Optimizer],\n batch_idx: int,\n ) -> None:\n \"\"\"In clousure mode, the stepping of the optimizer cannot be changed.\n\n If you want to change the stepping of the optimizer, please use the normal Logic class.\n\n Args:\n optimizers (dict of torch.optim.Optimizer):\n The optimizers.\n batch_idx (int):\n Number of steps already finished.\n \"\"\"\n pass\n", "path": "pytorch_pfn_extras/handler/_logic.py" } ]
diff --git a/pytorch_pfn_extras/handler/_logic.py b/pytorch_pfn_extras/handler/_logic.py index af9f78a53..68fbe1092 100644 --- a/pytorch_pfn_extras/handler/_logic.py +++ b/pytorch_pfn_extras/handler/_logic.py @@ -372,7 +372,8 @@ def eval_step( Input tensors feeded to the model of the current step. """ model = models[self.model_name] - outs = self._forward(model, batch) + with self._autocast.autocast(): + outs = self._forward(model, batch) return outs diff --git a/tests/pytorch_pfn_extras_tests/training_tests/test_trainer.py b/tests/pytorch_pfn_extras_tests/training_tests/test_trainer.py index 4cfd57f87..5cfd13d43 100644 --- a/tests/pytorch_pfn_extras_tests/training_tests/test_trainer.py +++ b/tests/pytorch_pfn_extras_tests/training_tests/test_trainer.py @@ -763,20 +763,52 @@ def test_trainer_with_clousure_logic(device, progress_bar, path): @pytest.mark.gpu -def test_trainer_with_autocast(path): [email protected]("autocast_train", [True, False]) [email protected]("autocast_eval", [True, False]) +def test_trainer_with_autocast(path, autocast_train, autocast_eval): if not torch.cuda.is_available(): pytest.skip() - model = MyModel() + + class AutocastCheckModel(MyModel): + def __init__(self, autocast_train, autocast_eval): + super().__init__() + self.autocast_train = autocast_train + self.autocast_eval = autocast_eval + + def forward(self, x): + if self.training: + assert torch.is_autocast_enabled() == self.autocast_train + if not self.training: + assert torch.is_autocast_enabled() == self.autocast_eval + + return super().forward(x) + + model = AutocastCheckModel( + autocast_train=autocast_train, autocast_eval=autocast_eval + ) model_with_loss = MyModelWithLossFn(model) ppe.to(model_with_loss, "cuda") optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + data = torch.utils.data.DataLoader( + [ + { + "x": torch.rand( + 20, + ), + "t": torch.rand( + 10, + ), + } + for i in range(10) + ] + ) extensions = [] - autocast_options = {"autocast": True} + evaluator = engine.create_evaluator( - model_with_loss, device="cuda", options=autocast_options + model_with_loss, device="cuda", options={"autocast": autocast_eval} ) - engine.create_trainer( + trainer = engine.create_trainer( model_with_loss, optimizer, 20, @@ -784,5 +816,7 @@ def test_trainer_with_autocast(path): evaluator=evaluator, extensions=extensions, out_dir=path, - options=autocast_options, + options={"autocast": autocast_train}, ) + + trainer.run(data, data)
plotly__plotly.py-2011
plotly.figure_factory._gantt.py.validate_gantt uses deprecated .ix This gives the warning: ``` .ix is deprecated. Please use .loc for label based indexing or .iloc for positional indexing ```
[ { "content": "from __future__ import absolute_import\n\nfrom numbers import Number\n\nimport copy\n\nfrom plotly import exceptions, optional_imports\nimport plotly.colors as clrs\nfrom plotly.figure_factory import utils\nimport plotly.graph_objects as go\n\npd = optional_imports.get_module(\"pandas\")\n\nREQUIRED_GANTT_KEYS = [\"Task\", \"Start\", \"Finish\"]\n\n\ndef _get_corner_points(x0, y0, x1, y1):\n \"\"\"\n Returns the corner points of a scatter rectangle\n\n :param x0: x-start\n :param y0: y-lower\n :param x1: x-end\n :param y1: y-upper\n :return: ([x], [y]), tuple of lists containing the x and y values\n \"\"\"\n\n return ([x0, x1, x1, x0], [y0, y0, y1, y1])\n\n\ndef validate_gantt(df):\n \"\"\"\n Validates the inputted dataframe or list\n \"\"\"\n if pd and isinstance(df, pd.core.frame.DataFrame):\n # validate that df has all the required keys\n for key in REQUIRED_GANTT_KEYS:\n if key not in df:\n raise exceptions.PlotlyError(\n \"The columns in your dataframe must include the \"\n \"following keys: {0}\".format(\", \".join(REQUIRED_GANTT_KEYS))\n )\n\n num_of_rows = len(df.index)\n chart = []\n for index in range(num_of_rows):\n task_dict = {}\n for key in df:\n task_dict[key] = df.ix[index][key]\n chart.append(task_dict)\n\n return chart\n\n # validate if df is a list\n if not isinstance(df, list):\n raise exceptions.PlotlyError(\n \"You must input either a dataframe \" \"or a list of dictionaries.\"\n )\n\n # validate if df is empty\n if len(df) <= 0:\n raise exceptions.PlotlyError(\n \"Your list is empty. It must contain \" \"at least one dictionary.\"\n )\n if not isinstance(df[0], dict):\n raise exceptions.PlotlyError(\"Your list must only \" \"include dictionaries.\")\n return df\n\n\ndef gantt(\n chart,\n colors,\n title,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n show_colorbar=True,\n):\n \"\"\"\n Refer to create_gantt() for docstring\n \"\"\"\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n\n for index in range(len(chart)):\n task = dict(\n x0=chart[index][\"Start\"],\n x1=chart[index][\"Finish\"],\n name=chart[index][\"Task\"],\n )\n if \"Description\" in chart[index]:\n task[\"description\"] = chart[index][\"Description\"]\n tasks.append(task)\n\n # create a scatter trace for every task group\n scatter_data_dict = dict()\n marker_data_dict = dict()\n\n if show_hover_fill:\n hoverinfo = \"name\"\n else:\n hoverinfo = \"skip\"\n\n scatter_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"none\",\n \"fill\": \"toself\",\n \"hoverinfo\": hoverinfo,\n }\n\n marker_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"markers\",\n \"text\": [],\n \"marker\": dict(color=\"\", size=1, opacity=0),\n \"name\": \"\",\n \"showlegend\": False,\n }\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted first\n # are shown at the top\n if group_tasks:\n task_names.reverse()\n\n color_index = 0\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n del tasks[index][\"name\"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index][\"y0\"] = groupID - bar_width\n tasks[index][\"y1\"] = groupID + bar_width\n\n # check if colors need to be looped\n if color_index >= len(colors):\n color_index = 0\n tasks[index][\"fillcolor\"] = colors[color_index]\n color_id = tasks[index][\"fillcolor\"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id][\"fillcolor\"] = color_id\n scatter_data_dict[color_id][\"name\"] = str(tn)\n scatter_data_dict[color_id][\"legendgroup\"] = color_id\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id][\"x\"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id][\"x\"].append(\n scatter_data_dict[color_id][\"x\"][-1]\n )\n scatter_data_dict[color_id][\"y\"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index][\"x0\"],\n tasks[index][\"y0\"],\n tasks[index][\"x1\"],\n tasks[index][\"y1\"],\n )\n\n scatter_data_dict[color_id][\"x\"] += xs\n scatter_data_dict[color_id][\"y\"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id][\"marker\"][\"color\"] = color_id\n marker_data_dict[color_id][\"legendgroup\"] = color_id\n\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x0\"])\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x1\"])\n marker_data_dict[color_id][\"y\"].append(groupID)\n marker_data_dict[color_id][\"y\"].append(groupID)\n\n if \"description\" in tasks[index]:\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n del tasks[index][\"description\"]\n else:\n marker_data_dict[color_id][\"text\"].append(None)\n marker_data_dict[color_id][\"text\"].append(None)\n\n color_index += 1\n\n showlegend = show_colorbar\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode=\"closest\",\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(task_names))),\n range=[-1, len(task_names) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label=\"1w\", step=\"day\", stepmode=\"backward\"),\n dict(count=1, label=\"1m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"backward\"),\n dict(count=1, label=\"YTD\", step=\"year\", stepmode=\"todate\"),\n dict(count=1, label=\"1y\", step=\"year\", stepmode=\"backward\"),\n dict(step=\"all\"),\n ]\n )\n ),\n type=\"date\",\n ),\n )\n\n data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]\n data += [marker_data_dict[k] for k in sorted(marker_data_dict)]\n\n # fig = dict(\n # data=data, layout=layout\n # )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\ndef gantt_colorscale(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(\n x0=chart[index][\"Start\"],\n x1=chart[index][\"Finish\"],\n name=chart[index][\"Task\"],\n )\n if \"Description\" in chart[index]:\n task[\"description\"] = chart[index][\"Description\"]\n tasks.append(task)\n\n # create a scatter trace for every task group\n scatter_data_dict = dict()\n # create scatter traces for the start- and endpoints\n marker_data_dict = dict()\n\n if show_hover_fill:\n hoverinfo = \"name\"\n else:\n hoverinfo = \"skip\"\n\n scatter_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"none\",\n \"fill\": \"toself\",\n \"showlegend\": False,\n \"hoverinfo\": hoverinfo,\n \"legendgroup\": \"\",\n }\n\n marker_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"markers\",\n \"text\": [],\n \"marker\": dict(color=\"\", size=1, opacity=0),\n \"name\": \"\",\n \"showlegend\": False,\n \"legendgroup\": \"\",\n }\n\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n # compute the color for task based on indexing column\n if isinstance(chart[0][index_col], Number):\n # check that colors has at least 2 colors\n if len(colors) < 2:\n raise exceptions.PlotlyError(\n \"You must use at least 2 colors in 'colors' if you \"\n \"are using a colorscale. However only the first two \"\n \"colors given will be used for the lower and upper \"\n \"bounds on the colormap.\"\n )\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted\n # first are shown at the top\n if group_tasks:\n task_names.reverse()\n\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n del tasks[index][\"name\"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index][\"y0\"] = groupID - bar_width\n tasks[index][\"y1\"] = groupID + bar_width\n\n # unlabel color\n colors = clrs.color_parser(colors, clrs.unlabel_rgb)\n lowcolor = colors[0]\n highcolor = colors[1]\n\n intermed = (chart[index][index_col]) / 100.0\n intermed_color = clrs.find_intermediate_color(lowcolor, highcolor, intermed)\n intermed_color = clrs.color_parser(intermed_color, clrs.label_rgb)\n tasks[index][\"fillcolor\"] = intermed_color\n color_id = tasks[index][\"fillcolor\"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id][\"fillcolor\"] = color_id\n scatter_data_dict[color_id][\"name\"] = str(chart[index][index_col])\n scatter_data_dict[color_id][\"legendgroup\"] = color_id\n\n # relabel colors with 'rgb'\n colors = clrs.color_parser(colors, clrs.label_rgb)\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id][\"x\"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id][\"x\"].append(\n scatter_data_dict[color_id][\"x\"][-1]\n )\n scatter_data_dict[color_id][\"y\"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index][\"x0\"],\n tasks[index][\"y0\"],\n tasks[index][\"x1\"],\n tasks[index][\"y1\"],\n )\n\n scatter_data_dict[color_id][\"x\"] += xs\n scatter_data_dict[color_id][\"y\"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id][\"marker\"][\"color\"] = color_id\n marker_data_dict[color_id][\"legendgroup\"] = color_id\n\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x0\"])\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x1\"])\n marker_data_dict[color_id][\"y\"].append(groupID)\n marker_data_dict[color_id][\"y\"].append(groupID)\n\n if \"description\" in tasks[index]:\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n del tasks[index][\"description\"]\n else:\n marker_data_dict[color_id][\"text\"].append(None)\n marker_data_dict[color_id][\"text\"].append(None)\n\n # add colorbar to one of the traces randomly just for display\n if show_colorbar is True:\n k = list(marker_data_dict.keys())[0]\n marker_data_dict[k][\"marker\"].update(\n dict(\n colorscale=[[0, colors[0]], [1, colors[1]]],\n showscale=True,\n cmax=100,\n cmin=0,\n )\n )\n\n if isinstance(chart[0][index_col], str):\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n if len(colors) < len(index_vals):\n raise exceptions.PlotlyError(\n \"Error. The number of colors in 'colors' must be no less \"\n \"than the number of unique index values in your group \"\n \"column.\"\n )\n\n # make a dictionary assignment to each index value\n index_vals_dict = {}\n # define color index\n c_index = 0\n for key in index_vals:\n if c_index > len(colors) - 1:\n c_index = 0\n index_vals_dict[key] = colors[c_index]\n c_index += 1\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted\n # first are shown at the top\n if group_tasks:\n task_names.reverse()\n\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n del tasks[index][\"name\"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index][\"y0\"] = groupID - bar_width\n tasks[index][\"y1\"] = groupID + bar_width\n\n tasks[index][\"fillcolor\"] = index_vals_dict[chart[index][index_col]]\n color_id = tasks[index][\"fillcolor\"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id][\"fillcolor\"] = color_id\n scatter_data_dict[color_id][\"legendgroup\"] = color_id\n scatter_data_dict[color_id][\"name\"] = str(chart[index][index_col])\n\n # relabel colors with 'rgb'\n colors = clrs.color_parser(colors, clrs.label_rgb)\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id][\"x\"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id][\"x\"].append(\n scatter_data_dict[color_id][\"x\"][-1]\n )\n scatter_data_dict[color_id][\"y\"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index][\"x0\"],\n tasks[index][\"y0\"],\n tasks[index][\"x1\"],\n tasks[index][\"y1\"],\n )\n\n scatter_data_dict[color_id][\"x\"] += xs\n scatter_data_dict[color_id][\"y\"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id][\"marker\"][\"color\"] = color_id\n marker_data_dict[color_id][\"legendgroup\"] = color_id\n\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x0\"])\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x1\"])\n marker_data_dict[color_id][\"y\"].append(groupID)\n marker_data_dict[color_id][\"y\"].append(groupID)\n\n if \"description\" in tasks[index]:\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n del tasks[index][\"description\"]\n else:\n marker_data_dict[color_id][\"text\"].append(None)\n marker_data_dict[color_id][\"text\"].append(None)\n\n if show_colorbar is True:\n showlegend = True\n for k in scatter_data_dict:\n scatter_data_dict[k][\"showlegend\"] = showlegend\n # add colorbar to one of the traces randomly just for display\n # if show_colorbar is True:\n # k = list(marker_data_dict.keys())[0]\n # marker_data_dict[k][\"marker\"].update(\n # dict(\n # colorscale=[[0, colors[0]], [1, colors[1]]],\n # showscale=True,\n # cmax=100,\n # cmin=0,\n # )\n # )\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode=\"closest\",\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(task_names))),\n range=[-1, len(task_names) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label=\"1w\", step=\"day\", stepmode=\"backward\"),\n dict(count=1, label=\"1m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"backward\"),\n dict(count=1, label=\"YTD\", step=\"year\", stepmode=\"todate\"),\n dict(count=1, label=\"1y\", step=\"year\", stepmode=\"backward\"),\n dict(step=\"all\"),\n ]\n )\n ),\n type=\"date\",\n ),\n )\n\n data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]\n data += [marker_data_dict[k] for k in sorted(marker_data_dict)]\n\n # fig = dict(\n # data=data, layout=layout\n # )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\ndef gantt_dict(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(\n x0=chart[index][\"Start\"],\n x1=chart[index][\"Finish\"],\n name=chart[index][\"Task\"],\n )\n if \"Description\" in chart[index]:\n task[\"description\"] = chart[index][\"Description\"]\n tasks.append(task)\n\n # create a scatter trace for every task group\n scatter_data_dict = dict()\n # create scatter traces for the start- and endpoints\n marker_data_dict = dict()\n\n if show_hover_fill:\n hoverinfo = \"name\"\n else:\n hoverinfo = \"skip\"\n\n scatter_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"none\",\n \"fill\": \"toself\",\n \"hoverinfo\": hoverinfo,\n \"legendgroup\": \"\",\n }\n\n marker_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"markers\",\n \"text\": [],\n \"marker\": dict(color=\"\", size=1, opacity=0),\n \"name\": \"\",\n \"showlegend\": False,\n }\n\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n # verify each value in index column appears in colors dictionary\n for key in index_vals:\n if key not in colors:\n raise exceptions.PlotlyError(\n \"If you are using colors as a dictionary, all of its \"\n \"keys must be all the values in the index column.\"\n )\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted first\n # are shown at the top\n if group_tasks:\n task_names.reverse()\n\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n del tasks[index][\"name\"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index][\"y0\"] = groupID - bar_width\n tasks[index][\"y1\"] = groupID + bar_width\n\n tasks[index][\"fillcolor\"] = colors[chart[index][index_col]]\n color_id = tasks[index][\"fillcolor\"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id][\"legendgroup\"] = color_id\n scatter_data_dict[color_id][\"fillcolor\"] = color_id\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id][\"x\"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id][\"x\"].append(\n scatter_data_dict[color_id][\"x\"][-1]\n )\n scatter_data_dict[color_id][\"y\"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index][\"x0\"],\n tasks[index][\"y0\"],\n tasks[index][\"x1\"],\n tasks[index][\"y1\"],\n )\n\n scatter_data_dict[color_id][\"x\"] += xs\n scatter_data_dict[color_id][\"y\"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id][\"marker\"][\"color\"] = color_id\n marker_data_dict[color_id][\"legendgroup\"] = color_id\n\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x0\"])\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x1\"])\n marker_data_dict[color_id][\"y\"].append(groupID)\n marker_data_dict[color_id][\"y\"].append(groupID)\n\n if \"description\" in tasks[index]:\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n del tasks[index][\"description\"]\n else:\n marker_data_dict[color_id][\"text\"].append(None)\n marker_data_dict[color_id][\"text\"].append(None)\n\n if show_colorbar is True:\n showlegend = True\n\n for index_value in index_vals:\n scatter_data_dict[colors[index_value]][\"name\"] = str(index_value)\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode=\"closest\",\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(task_names))),\n range=[-1, len(task_names) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label=\"1w\", step=\"day\", stepmode=\"backward\"),\n dict(count=1, label=\"1m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"backward\"),\n dict(count=1, label=\"YTD\", step=\"year\", stepmode=\"todate\"),\n dict(count=1, label=\"1y\", step=\"year\", stepmode=\"backward\"),\n dict(step=\"all\"),\n ]\n )\n ),\n type=\"date\",\n ),\n )\n\n data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]\n data += [marker_data_dict[k] for k in sorted(marker_data_dict)]\n\n # fig = dict(\n # data=data, layout=layout\n # )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\ndef create_gantt(\n df,\n colors=None,\n index_col=None,\n show_colorbar=False,\n reverse_colors=False,\n title=\"Gantt Chart\",\n bar_width=0.2,\n showgrid_x=False,\n showgrid_y=False,\n height=600,\n width=None,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n):\n \"\"\"\n Returns figure for a gantt chart\n\n :param (array|list) df: input data for gantt chart. Must be either a\n a dataframe or a list. If dataframe, the columns must include\n 'Task', 'Start' and 'Finish'. Other columns can be included and\n used for indexing. If a list, its elements must be dictionaries\n with the same required column headers: 'Task', 'Start' and\n 'Finish'.\n :param (str|list|dict|tuple) colors: either a plotly scale name, an\n rgb or hex color, a color tuple or a list of colors. An rgb color\n is of the form 'rgb(x, y, z)' where x, y, z belong to the interval\n [0, 255] and a color tuple is a tuple of the form (a, b, c) where\n a, b and c belong to [0, 1]. If colors is a list, it must\n contain the valid color types aforementioned as its members.\n If a dictionary, all values of the indexing column must be keys in\n colors.\n :param (str|float) index_col: the column header (if df is a data\n frame) that will function as the indexing column. If df is a list,\n index_col must be one of the keys in all the items of df.\n :param (bool) show_colorbar: determines if colorbar will be visible.\n Only applies if values in the index column are numeric.\n :param (bool) show_hover_fill: enables/disables the hovertext for the\n filled area of the chart.\n :param (bool) reverse_colors: reverses the order of selected colors\n :param (str) title: the title of the chart\n :param (float) bar_width: the width of the horizontal bars in the plot\n :param (bool) showgrid_x: show/hide the x-axis grid\n :param (bool) showgrid_y: show/hide the y-axis grid\n :param (float) height: the height of the chart\n :param (float) width: the width of the chart\n\n Example 1: Simple Gantt Chart\n\n >>> from plotly.figure_factory import create_gantt\n\n >>> # Make data for chart\n >>> df = [dict(Task=\"Job A\", Start='2009-01-01', Finish='2009-02-30'),\n ... dict(Task=\"Job B\", Start='2009-03-05', Finish='2009-04-15'),\n ... dict(Task=\"Job C\", Start='2009-02-20', Finish='2009-05-30')]\n\n >>> # Create a figure\n >>> fig = create_gantt(df)\n >>> fig.show()\n\n\n Example 2: Index by Column with Numerical Entries\n\n >>> from plotly.figure_factory import create_gantt\n\n >>> # Make data for chart\n >>> df = [dict(Task=\"Job A\", Start='2009-01-01',\n ... Finish='2009-02-30', Complete=10),\n ... dict(Task=\"Job B\", Start='2009-03-05',\n ... Finish='2009-04-15', Complete=60),\n ... dict(Task=\"Job C\", Start='2009-02-20',\n ... Finish='2009-05-30', Complete=95)]\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors='Blues', index_col='Complete',\n ... show_colorbar=True, bar_width=0.5,\n ... showgrid_x=True, showgrid_y=True)\n >>> fig.show()\n\n\n Example 3: Index by Column with String Entries\n\n >>> from plotly.figure_factory import create_gantt\n\n >>> # Make data for chart\n >>> df = [dict(Task=\"Job A\", Start='2009-01-01',\n ... Finish='2009-02-30', Resource='Apple'),\n ... dict(Task=\"Job B\", Start='2009-03-05',\n ... Finish='2009-04-15', Resource='Grape'),\n ... dict(Task=\"Job C\", Start='2009-02-20',\n ... Finish='2009-05-30', Resource='Banana')]\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors=['rgb(200, 50, 25)', (1, 0, 1), '#6c4774'],\n ... index_col='Resource', reverse_colors=True,\n ... show_colorbar=True)\n >>> fig.show()\n\n\n Example 4: Use a dictionary for colors\n\n >>> from plotly.figure_factory import create_gantt\n >>> # Make data for chart\n >>> df = [dict(Task=\"Job A\", Start='2009-01-01',\n ... Finish='2009-02-30', Resource='Apple'),\n ... dict(Task=\"Job B\", Start='2009-03-05',\n ... Finish='2009-04-15', Resource='Grape'),\n ... dict(Task=\"Job C\", Start='2009-02-20',\n ... Finish='2009-05-30', Resource='Banana')]\n\n >>> # Make a dictionary of colors\n >>> colors = {'Apple': 'rgb(255, 0, 0)',\n ... 'Grape': 'rgb(170, 14, 200)',\n ... 'Banana': (1, 1, 0.2)}\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors=colors, index_col='Resource',\n ... show_colorbar=True)\n\n >>> fig.show()\n\n Example 5: Use a pandas dataframe\n\n >>> from plotly.figure_factory import create_gantt\n >>> import pandas as pd\n\n >>> # Make data as a dataframe\n >>> df = pd.DataFrame([['Run', '2010-01-01', '2011-02-02', 10],\n ... ['Fast', '2011-01-01', '2012-06-05', 55],\n ... ['Eat', '2012-01-05', '2013-07-05', 94]],\n ... columns=['Task', 'Start', 'Finish', 'Complete'])\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors='Blues', index_col='Complete',\n ... show_colorbar=True, bar_width=0.5,\n ... showgrid_x=True, showgrid_y=True)\n >>> fig.show()\n \"\"\"\n # validate gantt input data\n chart = validate_gantt(df)\n\n if index_col:\n if index_col not in chart[0]:\n raise exceptions.PlotlyError(\n \"In order to use an indexing column and assign colors to \"\n \"the values of the index, you must choose an actual \"\n \"column name in the dataframe or key if a list of \"\n \"dictionaries is being used.\"\n )\n\n # validate gantt index column\n index_list = []\n for dictionary in chart:\n index_list.append(dictionary[index_col])\n utils.validate_index(index_list)\n\n # Validate colors\n if isinstance(colors, dict):\n colors = clrs.validate_colors_dict(colors, \"rgb\")\n else:\n colors = clrs.validate_colors(colors, \"rgb\")\n\n if reverse_colors is True:\n colors.reverse()\n\n if not index_col:\n if isinstance(colors, dict):\n raise exceptions.PlotlyError(\n \"Error. You have set colors to a dictionary but have not \"\n \"picked an index. An index is required if you are \"\n \"assigning colors to particular values in a dictioanry.\"\n )\n fig = gantt(\n chart,\n colors,\n title,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=group_tasks,\n show_hover_fill=show_hover_fill,\n show_colorbar=show_colorbar,\n )\n return fig\n else:\n if not isinstance(colors, dict):\n fig = gantt_colorscale(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=group_tasks,\n show_hover_fill=show_hover_fill,\n )\n return fig\n else:\n fig = gantt_dict(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=group_tasks,\n show_hover_fill=show_hover_fill,\n )\n return fig\n", "path": "packages/python/plotly/plotly/figure_factory/_gantt.py" } ]
[ { "content": "from __future__ import absolute_import\n\nfrom numbers import Number\n\nimport copy\n\nfrom plotly import exceptions, optional_imports\nimport plotly.colors as clrs\nfrom plotly.figure_factory import utils\nimport plotly.graph_objects as go\n\npd = optional_imports.get_module(\"pandas\")\n\nREQUIRED_GANTT_KEYS = [\"Task\", \"Start\", \"Finish\"]\n\n\ndef _get_corner_points(x0, y0, x1, y1):\n \"\"\"\n Returns the corner points of a scatter rectangle\n\n :param x0: x-start\n :param y0: y-lower\n :param x1: x-end\n :param y1: y-upper\n :return: ([x], [y]), tuple of lists containing the x and y values\n \"\"\"\n\n return ([x0, x1, x1, x0], [y0, y0, y1, y1])\n\n\ndef validate_gantt(df):\n \"\"\"\n Validates the inputted dataframe or list\n \"\"\"\n if pd and isinstance(df, pd.core.frame.DataFrame):\n # validate that df has all the required keys\n for key in REQUIRED_GANTT_KEYS:\n if key not in df:\n raise exceptions.PlotlyError(\n \"The columns in your dataframe must include the \"\n \"following keys: {0}\".format(\", \".join(REQUIRED_GANTT_KEYS))\n )\n\n num_of_rows = len(df.index)\n chart = []\n for index in range(num_of_rows):\n task_dict = {}\n for key in df:\n task_dict[key] = df.iloc[index][key]\n chart.append(task_dict)\n\n return chart\n\n # validate if df is a list\n if not isinstance(df, list):\n raise exceptions.PlotlyError(\n \"You must input either a dataframe \" \"or a list of dictionaries.\"\n )\n\n # validate if df is empty\n if len(df) <= 0:\n raise exceptions.PlotlyError(\n \"Your list is empty. It must contain \" \"at least one dictionary.\"\n )\n if not isinstance(df[0], dict):\n raise exceptions.PlotlyError(\"Your list must only \" \"include dictionaries.\")\n return df\n\n\ndef gantt(\n chart,\n colors,\n title,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n show_colorbar=True,\n):\n \"\"\"\n Refer to create_gantt() for docstring\n \"\"\"\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n\n for index in range(len(chart)):\n task = dict(\n x0=chart[index][\"Start\"],\n x1=chart[index][\"Finish\"],\n name=chart[index][\"Task\"],\n )\n if \"Description\" in chart[index]:\n task[\"description\"] = chart[index][\"Description\"]\n tasks.append(task)\n\n # create a scatter trace for every task group\n scatter_data_dict = dict()\n marker_data_dict = dict()\n\n if show_hover_fill:\n hoverinfo = \"name\"\n else:\n hoverinfo = \"skip\"\n\n scatter_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"none\",\n \"fill\": \"toself\",\n \"hoverinfo\": hoverinfo,\n }\n\n marker_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"markers\",\n \"text\": [],\n \"marker\": dict(color=\"\", size=1, opacity=0),\n \"name\": \"\",\n \"showlegend\": False,\n }\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted first\n # are shown at the top\n if group_tasks:\n task_names.reverse()\n\n color_index = 0\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n del tasks[index][\"name\"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index][\"y0\"] = groupID - bar_width\n tasks[index][\"y1\"] = groupID + bar_width\n\n # check if colors need to be looped\n if color_index >= len(colors):\n color_index = 0\n tasks[index][\"fillcolor\"] = colors[color_index]\n color_id = tasks[index][\"fillcolor\"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id][\"fillcolor\"] = color_id\n scatter_data_dict[color_id][\"name\"] = str(tn)\n scatter_data_dict[color_id][\"legendgroup\"] = color_id\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id][\"x\"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id][\"x\"].append(\n scatter_data_dict[color_id][\"x\"][-1]\n )\n scatter_data_dict[color_id][\"y\"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index][\"x0\"],\n tasks[index][\"y0\"],\n tasks[index][\"x1\"],\n tasks[index][\"y1\"],\n )\n\n scatter_data_dict[color_id][\"x\"] += xs\n scatter_data_dict[color_id][\"y\"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id][\"marker\"][\"color\"] = color_id\n marker_data_dict[color_id][\"legendgroup\"] = color_id\n\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x0\"])\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x1\"])\n marker_data_dict[color_id][\"y\"].append(groupID)\n marker_data_dict[color_id][\"y\"].append(groupID)\n\n if \"description\" in tasks[index]:\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n del tasks[index][\"description\"]\n else:\n marker_data_dict[color_id][\"text\"].append(None)\n marker_data_dict[color_id][\"text\"].append(None)\n\n color_index += 1\n\n showlegend = show_colorbar\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode=\"closest\",\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(task_names))),\n range=[-1, len(task_names) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label=\"1w\", step=\"day\", stepmode=\"backward\"),\n dict(count=1, label=\"1m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"backward\"),\n dict(count=1, label=\"YTD\", step=\"year\", stepmode=\"todate\"),\n dict(count=1, label=\"1y\", step=\"year\", stepmode=\"backward\"),\n dict(step=\"all\"),\n ]\n )\n ),\n type=\"date\",\n ),\n )\n\n data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]\n data += [marker_data_dict[k] for k in sorted(marker_data_dict)]\n\n # fig = dict(\n # data=data, layout=layout\n # )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\ndef gantt_colorscale(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(\n x0=chart[index][\"Start\"],\n x1=chart[index][\"Finish\"],\n name=chart[index][\"Task\"],\n )\n if \"Description\" in chart[index]:\n task[\"description\"] = chart[index][\"Description\"]\n tasks.append(task)\n\n # create a scatter trace for every task group\n scatter_data_dict = dict()\n # create scatter traces for the start- and endpoints\n marker_data_dict = dict()\n\n if show_hover_fill:\n hoverinfo = \"name\"\n else:\n hoverinfo = \"skip\"\n\n scatter_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"none\",\n \"fill\": \"toself\",\n \"showlegend\": False,\n \"hoverinfo\": hoverinfo,\n \"legendgroup\": \"\",\n }\n\n marker_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"markers\",\n \"text\": [],\n \"marker\": dict(color=\"\", size=1, opacity=0),\n \"name\": \"\",\n \"showlegend\": False,\n \"legendgroup\": \"\",\n }\n\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n # compute the color for task based on indexing column\n if isinstance(chart[0][index_col], Number):\n # check that colors has at least 2 colors\n if len(colors) < 2:\n raise exceptions.PlotlyError(\n \"You must use at least 2 colors in 'colors' if you \"\n \"are using a colorscale. However only the first two \"\n \"colors given will be used for the lower and upper \"\n \"bounds on the colormap.\"\n )\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted\n # first are shown at the top\n if group_tasks:\n task_names.reverse()\n\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n del tasks[index][\"name\"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index][\"y0\"] = groupID - bar_width\n tasks[index][\"y1\"] = groupID + bar_width\n\n # unlabel color\n colors = clrs.color_parser(colors, clrs.unlabel_rgb)\n lowcolor = colors[0]\n highcolor = colors[1]\n\n intermed = (chart[index][index_col]) / 100.0\n intermed_color = clrs.find_intermediate_color(lowcolor, highcolor, intermed)\n intermed_color = clrs.color_parser(intermed_color, clrs.label_rgb)\n tasks[index][\"fillcolor\"] = intermed_color\n color_id = tasks[index][\"fillcolor\"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id][\"fillcolor\"] = color_id\n scatter_data_dict[color_id][\"name\"] = str(chart[index][index_col])\n scatter_data_dict[color_id][\"legendgroup\"] = color_id\n\n # relabel colors with 'rgb'\n colors = clrs.color_parser(colors, clrs.label_rgb)\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id][\"x\"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id][\"x\"].append(\n scatter_data_dict[color_id][\"x\"][-1]\n )\n scatter_data_dict[color_id][\"y\"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index][\"x0\"],\n tasks[index][\"y0\"],\n tasks[index][\"x1\"],\n tasks[index][\"y1\"],\n )\n\n scatter_data_dict[color_id][\"x\"] += xs\n scatter_data_dict[color_id][\"y\"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id][\"marker\"][\"color\"] = color_id\n marker_data_dict[color_id][\"legendgroup\"] = color_id\n\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x0\"])\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x1\"])\n marker_data_dict[color_id][\"y\"].append(groupID)\n marker_data_dict[color_id][\"y\"].append(groupID)\n\n if \"description\" in tasks[index]:\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n del tasks[index][\"description\"]\n else:\n marker_data_dict[color_id][\"text\"].append(None)\n marker_data_dict[color_id][\"text\"].append(None)\n\n # add colorbar to one of the traces randomly just for display\n if show_colorbar is True:\n k = list(marker_data_dict.keys())[0]\n marker_data_dict[k][\"marker\"].update(\n dict(\n colorscale=[[0, colors[0]], [1, colors[1]]],\n showscale=True,\n cmax=100,\n cmin=0,\n )\n )\n\n if isinstance(chart[0][index_col], str):\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n if len(colors) < len(index_vals):\n raise exceptions.PlotlyError(\n \"Error. The number of colors in 'colors' must be no less \"\n \"than the number of unique index values in your group \"\n \"column.\"\n )\n\n # make a dictionary assignment to each index value\n index_vals_dict = {}\n # define color index\n c_index = 0\n for key in index_vals:\n if c_index > len(colors) - 1:\n c_index = 0\n index_vals_dict[key] = colors[c_index]\n c_index += 1\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted\n # first are shown at the top\n if group_tasks:\n task_names.reverse()\n\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n del tasks[index][\"name\"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index][\"y0\"] = groupID - bar_width\n tasks[index][\"y1\"] = groupID + bar_width\n\n tasks[index][\"fillcolor\"] = index_vals_dict[chart[index][index_col]]\n color_id = tasks[index][\"fillcolor\"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id][\"fillcolor\"] = color_id\n scatter_data_dict[color_id][\"legendgroup\"] = color_id\n scatter_data_dict[color_id][\"name\"] = str(chart[index][index_col])\n\n # relabel colors with 'rgb'\n colors = clrs.color_parser(colors, clrs.label_rgb)\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id][\"x\"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id][\"x\"].append(\n scatter_data_dict[color_id][\"x\"][-1]\n )\n scatter_data_dict[color_id][\"y\"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index][\"x0\"],\n tasks[index][\"y0\"],\n tasks[index][\"x1\"],\n tasks[index][\"y1\"],\n )\n\n scatter_data_dict[color_id][\"x\"] += xs\n scatter_data_dict[color_id][\"y\"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id][\"marker\"][\"color\"] = color_id\n marker_data_dict[color_id][\"legendgroup\"] = color_id\n\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x0\"])\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x1\"])\n marker_data_dict[color_id][\"y\"].append(groupID)\n marker_data_dict[color_id][\"y\"].append(groupID)\n\n if \"description\" in tasks[index]:\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n del tasks[index][\"description\"]\n else:\n marker_data_dict[color_id][\"text\"].append(None)\n marker_data_dict[color_id][\"text\"].append(None)\n\n if show_colorbar is True:\n showlegend = True\n for k in scatter_data_dict:\n scatter_data_dict[k][\"showlegend\"] = showlegend\n # add colorbar to one of the traces randomly just for display\n # if show_colorbar is True:\n # k = list(marker_data_dict.keys())[0]\n # marker_data_dict[k][\"marker\"].update(\n # dict(\n # colorscale=[[0, colors[0]], [1, colors[1]]],\n # showscale=True,\n # cmax=100,\n # cmin=0,\n # )\n # )\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode=\"closest\",\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(task_names))),\n range=[-1, len(task_names) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label=\"1w\", step=\"day\", stepmode=\"backward\"),\n dict(count=1, label=\"1m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"backward\"),\n dict(count=1, label=\"YTD\", step=\"year\", stepmode=\"todate\"),\n dict(count=1, label=\"1y\", step=\"year\", stepmode=\"backward\"),\n dict(step=\"all\"),\n ]\n )\n ),\n type=\"date\",\n ),\n )\n\n data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]\n data += [marker_data_dict[k] for k in sorted(marker_data_dict)]\n\n # fig = dict(\n # data=data, layout=layout\n # )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\ndef gantt_dict(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n):\n \"\"\"\n Refer to FigureFactory.create_gantt() for docstring\n \"\"\"\n\n if tasks is None:\n tasks = []\n if task_names is None:\n task_names = []\n if data is None:\n data = []\n showlegend = False\n\n for index in range(len(chart)):\n task = dict(\n x0=chart[index][\"Start\"],\n x1=chart[index][\"Finish\"],\n name=chart[index][\"Task\"],\n )\n if \"Description\" in chart[index]:\n task[\"description\"] = chart[index][\"Description\"]\n tasks.append(task)\n\n # create a scatter trace for every task group\n scatter_data_dict = dict()\n # create scatter traces for the start- and endpoints\n marker_data_dict = dict()\n\n if show_hover_fill:\n hoverinfo = \"name\"\n else:\n hoverinfo = \"skip\"\n\n scatter_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"none\",\n \"fill\": \"toself\",\n \"hoverinfo\": hoverinfo,\n \"legendgroup\": \"\",\n }\n\n marker_data_template = {\n \"x\": [],\n \"y\": [],\n \"mode\": \"markers\",\n \"text\": [],\n \"marker\": dict(color=\"\", size=1, opacity=0),\n \"name\": \"\",\n \"showlegend\": False,\n }\n\n index_vals = []\n for row in range(len(tasks)):\n if chart[row][index_col] not in index_vals:\n index_vals.append(chart[row][index_col])\n\n index_vals.sort()\n\n # verify each value in index column appears in colors dictionary\n for key in index_vals:\n if key not in colors:\n raise exceptions.PlotlyError(\n \"If you are using colors as a dictionary, all of its \"\n \"keys must be all the values in the index column.\"\n )\n\n # create the list of task names\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n # Is added to task_names if group_tasks is set to False,\n # or if the option is used (True) it only adds them if the\n # name is not already in the list\n if not group_tasks or tn not in task_names:\n task_names.append(tn)\n # Guarantees that for grouped tasks the tasks that are inserted first\n # are shown at the top\n if group_tasks:\n task_names.reverse()\n\n for index in range(len(tasks)):\n tn = tasks[index][\"name\"]\n del tasks[index][\"name\"]\n\n # If group_tasks is True, all tasks with the same name belong\n # to the same row.\n groupID = index\n if group_tasks:\n groupID = task_names.index(tn)\n tasks[index][\"y0\"] = groupID - bar_width\n tasks[index][\"y1\"] = groupID + bar_width\n\n tasks[index][\"fillcolor\"] = colors[chart[index][index_col]]\n color_id = tasks[index][\"fillcolor\"]\n\n if color_id not in scatter_data_dict:\n scatter_data_dict[color_id] = copy.deepcopy(scatter_data_template)\n\n scatter_data_dict[color_id][\"legendgroup\"] = color_id\n scatter_data_dict[color_id][\"fillcolor\"] = color_id\n\n # if there are already values append the gap\n if len(scatter_data_dict[color_id][\"x\"]) > 0:\n # a gap on the scatterplot separates the rectangles from each other\n scatter_data_dict[color_id][\"x\"].append(\n scatter_data_dict[color_id][\"x\"][-1]\n )\n scatter_data_dict[color_id][\"y\"].append(None)\n\n xs, ys = _get_corner_points(\n tasks[index][\"x0\"],\n tasks[index][\"y0\"],\n tasks[index][\"x1\"],\n tasks[index][\"y1\"],\n )\n\n scatter_data_dict[color_id][\"x\"] += xs\n scatter_data_dict[color_id][\"y\"] += ys\n\n # append dummy markers for showing start and end of interval\n if color_id not in marker_data_dict:\n marker_data_dict[color_id] = copy.deepcopy(marker_data_template)\n marker_data_dict[color_id][\"marker\"][\"color\"] = color_id\n marker_data_dict[color_id][\"legendgroup\"] = color_id\n\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x0\"])\n marker_data_dict[color_id][\"x\"].append(tasks[index][\"x1\"])\n marker_data_dict[color_id][\"y\"].append(groupID)\n marker_data_dict[color_id][\"y\"].append(groupID)\n\n if \"description\" in tasks[index]:\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n marker_data_dict[color_id][\"text\"].append(tasks[index][\"description\"])\n del tasks[index][\"description\"]\n else:\n marker_data_dict[color_id][\"text\"].append(None)\n marker_data_dict[color_id][\"text\"].append(None)\n\n if show_colorbar is True:\n showlegend = True\n\n for index_value in index_vals:\n scatter_data_dict[colors[index_value]][\"name\"] = str(index_value)\n\n layout = dict(\n title=title,\n showlegend=showlegend,\n height=height,\n width=width,\n shapes=[],\n hovermode=\"closest\",\n yaxis=dict(\n showgrid=showgrid_y,\n ticktext=task_names,\n tickvals=list(range(len(task_names))),\n range=[-1, len(task_names) + 1],\n autorange=False,\n zeroline=False,\n ),\n xaxis=dict(\n showgrid=showgrid_x,\n zeroline=False,\n rangeselector=dict(\n buttons=list(\n [\n dict(count=7, label=\"1w\", step=\"day\", stepmode=\"backward\"),\n dict(count=1, label=\"1m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"backward\"),\n dict(count=1, label=\"YTD\", step=\"year\", stepmode=\"todate\"),\n dict(count=1, label=\"1y\", step=\"year\", stepmode=\"backward\"),\n dict(step=\"all\"),\n ]\n )\n ),\n type=\"date\",\n ),\n )\n\n data = [scatter_data_dict[k] for k in sorted(scatter_data_dict)]\n data += [marker_data_dict[k] for k in sorted(marker_data_dict)]\n\n # fig = dict(\n # data=data, layout=layout\n # )\n fig = go.Figure(data=data, layout=layout)\n return fig\n\n\ndef create_gantt(\n df,\n colors=None,\n index_col=None,\n show_colorbar=False,\n reverse_colors=False,\n title=\"Gantt Chart\",\n bar_width=0.2,\n showgrid_x=False,\n showgrid_y=False,\n height=600,\n width=None,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=False,\n show_hover_fill=True,\n):\n \"\"\"\n Returns figure for a gantt chart\n\n :param (array|list) df: input data for gantt chart. Must be either a\n a dataframe or a list. If dataframe, the columns must include\n 'Task', 'Start' and 'Finish'. Other columns can be included and\n used for indexing. If a list, its elements must be dictionaries\n with the same required column headers: 'Task', 'Start' and\n 'Finish'.\n :param (str|list|dict|tuple) colors: either a plotly scale name, an\n rgb or hex color, a color tuple or a list of colors. An rgb color\n is of the form 'rgb(x, y, z)' where x, y, z belong to the interval\n [0, 255] and a color tuple is a tuple of the form (a, b, c) where\n a, b and c belong to [0, 1]. If colors is a list, it must\n contain the valid color types aforementioned as its members.\n If a dictionary, all values of the indexing column must be keys in\n colors.\n :param (str|float) index_col: the column header (if df is a data\n frame) that will function as the indexing column. If df is a list,\n index_col must be one of the keys in all the items of df.\n :param (bool) show_colorbar: determines if colorbar will be visible.\n Only applies if values in the index column are numeric.\n :param (bool) show_hover_fill: enables/disables the hovertext for the\n filled area of the chart.\n :param (bool) reverse_colors: reverses the order of selected colors\n :param (str) title: the title of the chart\n :param (float) bar_width: the width of the horizontal bars in the plot\n :param (bool) showgrid_x: show/hide the x-axis grid\n :param (bool) showgrid_y: show/hide the y-axis grid\n :param (float) height: the height of the chart\n :param (float) width: the width of the chart\n\n Example 1: Simple Gantt Chart\n\n >>> from plotly.figure_factory import create_gantt\n\n >>> # Make data for chart\n >>> df = [dict(Task=\"Job A\", Start='2009-01-01', Finish='2009-02-30'),\n ... dict(Task=\"Job B\", Start='2009-03-05', Finish='2009-04-15'),\n ... dict(Task=\"Job C\", Start='2009-02-20', Finish='2009-05-30')]\n\n >>> # Create a figure\n >>> fig = create_gantt(df)\n >>> fig.show()\n\n\n Example 2: Index by Column with Numerical Entries\n\n >>> from plotly.figure_factory import create_gantt\n\n >>> # Make data for chart\n >>> df = [dict(Task=\"Job A\", Start='2009-01-01',\n ... Finish='2009-02-30', Complete=10),\n ... dict(Task=\"Job B\", Start='2009-03-05',\n ... Finish='2009-04-15', Complete=60),\n ... dict(Task=\"Job C\", Start='2009-02-20',\n ... Finish='2009-05-30', Complete=95)]\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors='Blues', index_col='Complete',\n ... show_colorbar=True, bar_width=0.5,\n ... showgrid_x=True, showgrid_y=True)\n >>> fig.show()\n\n\n Example 3: Index by Column with String Entries\n\n >>> from plotly.figure_factory import create_gantt\n\n >>> # Make data for chart\n >>> df = [dict(Task=\"Job A\", Start='2009-01-01',\n ... Finish='2009-02-30', Resource='Apple'),\n ... dict(Task=\"Job B\", Start='2009-03-05',\n ... Finish='2009-04-15', Resource='Grape'),\n ... dict(Task=\"Job C\", Start='2009-02-20',\n ... Finish='2009-05-30', Resource='Banana')]\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors=['rgb(200, 50, 25)', (1, 0, 1), '#6c4774'],\n ... index_col='Resource', reverse_colors=True,\n ... show_colorbar=True)\n >>> fig.show()\n\n\n Example 4: Use a dictionary for colors\n\n >>> from plotly.figure_factory import create_gantt\n >>> # Make data for chart\n >>> df = [dict(Task=\"Job A\", Start='2009-01-01',\n ... Finish='2009-02-30', Resource='Apple'),\n ... dict(Task=\"Job B\", Start='2009-03-05',\n ... Finish='2009-04-15', Resource='Grape'),\n ... dict(Task=\"Job C\", Start='2009-02-20',\n ... Finish='2009-05-30', Resource='Banana')]\n\n >>> # Make a dictionary of colors\n >>> colors = {'Apple': 'rgb(255, 0, 0)',\n ... 'Grape': 'rgb(170, 14, 200)',\n ... 'Banana': (1, 1, 0.2)}\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors=colors, index_col='Resource',\n ... show_colorbar=True)\n\n >>> fig.show()\n\n Example 5: Use a pandas dataframe\n\n >>> from plotly.figure_factory import create_gantt\n >>> import pandas as pd\n\n >>> # Make data as a dataframe\n >>> df = pd.DataFrame([['Run', '2010-01-01', '2011-02-02', 10],\n ... ['Fast', '2011-01-01', '2012-06-05', 55],\n ... ['Eat', '2012-01-05', '2013-07-05', 94]],\n ... columns=['Task', 'Start', 'Finish', 'Complete'])\n\n >>> # Create a figure with Plotly colorscale\n >>> fig = create_gantt(df, colors='Blues', index_col='Complete',\n ... show_colorbar=True, bar_width=0.5,\n ... showgrid_x=True, showgrid_y=True)\n >>> fig.show()\n \"\"\"\n # validate gantt input data\n chart = validate_gantt(df)\n\n if index_col:\n if index_col not in chart[0]:\n raise exceptions.PlotlyError(\n \"In order to use an indexing column and assign colors to \"\n \"the values of the index, you must choose an actual \"\n \"column name in the dataframe or key if a list of \"\n \"dictionaries is being used.\"\n )\n\n # validate gantt index column\n index_list = []\n for dictionary in chart:\n index_list.append(dictionary[index_col])\n utils.validate_index(index_list)\n\n # Validate colors\n if isinstance(colors, dict):\n colors = clrs.validate_colors_dict(colors, \"rgb\")\n else:\n colors = clrs.validate_colors(colors, \"rgb\")\n\n if reverse_colors is True:\n colors.reverse()\n\n if not index_col:\n if isinstance(colors, dict):\n raise exceptions.PlotlyError(\n \"Error. You have set colors to a dictionary but have not \"\n \"picked an index. An index is required if you are \"\n \"assigning colors to particular values in a dictioanry.\"\n )\n fig = gantt(\n chart,\n colors,\n title,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=group_tasks,\n show_hover_fill=show_hover_fill,\n show_colorbar=show_colorbar,\n )\n return fig\n else:\n if not isinstance(colors, dict):\n fig = gantt_colorscale(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=group_tasks,\n show_hover_fill=show_hover_fill,\n )\n return fig\n else:\n fig = gantt_dict(\n chart,\n colors,\n title,\n index_col,\n show_colorbar,\n bar_width,\n showgrid_x,\n showgrid_y,\n height,\n width,\n tasks=None,\n task_names=None,\n data=None,\n group_tasks=group_tasks,\n show_hover_fill=show_hover_fill,\n )\n return fig\n", "path": "packages/python/plotly/plotly/figure_factory/_gantt.py" } ]
diff --git a/packages/python/plotly/plotly/figure_factory/_gantt.py b/packages/python/plotly/plotly/figure_factory/_gantt.py index 3d3f8f5cdd5..dfc5e4a2a32 100644 --- a/packages/python/plotly/plotly/figure_factory/_gantt.py +++ b/packages/python/plotly/plotly/figure_factory/_gantt.py @@ -46,7 +46,7 @@ def validate_gantt(df): for index in range(num_of_rows): task_dict = {} for key in df: - task_dict[key] = df.ix[index][key] + task_dict[key] = df.iloc[index][key] chart.append(task_dict) return chart
ivy-llc__ivy-13166
fmin
[ { "content": "# global\n\n# local\nimport ivy\nimport ivy.functional.frontends.torch as torch_frontend\nimport ivy.functional.frontends.torch.nn.functional as torch_frontend_nn\nfrom ivy.functional.frontends.numpy.creation_routines.from_existing_data import (\n array as np_frontend_array,\n)\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.func_wrapper import with_supported_dtypes\n\n\nclass Tensor:\n def __init__(self, array, device=None, _init_overload=False):\n\n if _init_overload:\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n else:\n self._ivy_array = ivy.array(\n array, dtype=torch_frontend.float32, device=device\n )\n\n def __repr__(self):\n return str(self._ivy_array.__repr__()).replace(\n \"ivy.array\", \"ivy.frontends.torch.Tensor\"\n )\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def device(self):\n return ivy.dev(self._ivy_array)\n\n @property\n def dtype(self):\n return self._ivy_array.dtype\n\n @property\n def shape(self):\n return self._ivy_array.shape\n\n # Setters #\n # --------#\n\n @ivy_array.setter\n def ivy_array(self, array):\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n # Instance Methods #\n # ---------------- #\n def reshape(self, *args, shape=None):\n if args and shape:\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\n if shape is not None:\n return torch_frontend.reshape(self._ivy_array, shape)\n if args:\n if isinstance(args[0], (tuple, list)):\n shape = args[0]\n return torch_frontend.reshape(self._ivy_array, shape)\n else:\n return torch_frontend.reshape(self._ivy_array, args)\n return torch_frontend.reshape(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def reshape_as(self, other):\n return torch_frontend.reshape(self, other.shape)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def add(self, other, *, alpha=1):\n return torch_frontend.add(self._ivy_array, other, alpha=alpha)\n\n def chunk(self, chunks, dim=0):\n return torch_frontend.chunk(self._ivy_array, chunks, dim=dim)\n\n def any(self, dim=None, keepdim=False, *, out=None):\n return torch_frontend.any(self._ivy_array, dim=dim, keepdim=keepdim, out=out)\n\n def all(self, dim=None, keepdim=False):\n return torch_frontend.all(self._ivy_array, dim=dim, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def add_(self, other, *, alpha=1):\n self._ivy_array = self.add(other, alpha=alpha).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asin(self):\n return torch_frontend.asin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asin_(self):\n self._ivy_array = self.asin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def sum(self):\n return torch_frontend.sum(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sin(self):\n return torch_frontend.sin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sin_(self):\n self._ivy_array = self.sin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sinh(self):\n return torch_frontend.sinh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sinh_(self):\n self._ivy_array = self.sinh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cos(self):\n return torch_frontend.cos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cos_(self):\n self._ivy_array = self.cos().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cosh(self):\n return torch_frontend.cosh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cosh_(self):\n self._ivy_array = self.cosh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arcsin(self):\n return torch_frontend.arcsin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arcsin_(self):\n self._ivy_array = self.arcsin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atan(self):\n return torch_frontend.atan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atan_(self):\n self._ivy_array = self.atan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def atan2(self, other):\n return torch_frontend.atan2(self._ivy_array, other)\n\n def view(self, *args, shape=None):\n \"\"\"\n Reshape Tensor.\n\n possible arguments are either:\n - size\n - tuple of ints\n - list of ints\n - torch.Size object\n - ints\n Parameters\n ----------\n args:int arguments\n shape: optional shape\n\n Returns reshaped tensor\n -------\n \"\"\"\n if shape and not args:\n shape_tup = shape\n elif args and not shape:\n if (\n isinstance(args[0], tuple)\n or isinstance(args[0], list)\n or type(args[0]).__name__ == \"Size\"\n ) and len(args) == 1:\n shape_tup = args[0]\n else:\n shape_tup = args\n else:\n raise ValueError(\n \"View only accepts as argument ints, tuple or list of ints or \"\n \"the keyword argument size.\"\n )\n return torch_frontend.reshape(self._ivy_array, shape_tup)\n\n def float(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.float32, copy=False)\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asinh(self):\n return torch_frontend.asinh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asinh_(self):\n self._ivy_array = self.asinh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tan(self):\n return torch_frontend.tan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tan_(self):\n self._ivy_array = self.tan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tanh(self):\n return torch_frontend.tanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tanh_(self):\n self._ivy_array = self.tanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atanh(self):\n return torch_frontend.atanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atanh_(self):\n self._ivy_array = self.atanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctanh(self):\n return torch_frontend.arctanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctanh_(self):\n self._ivy_array = self.arctanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log(self):\n return torch_frontend.log(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log_(self):\n self._ivy_array = self.log().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log2(self):\n return torch_frontend.log2(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def relu(self):\n return torch_frontend_nn.relu(self._ivy_array)\n\n def amax(self, dim=None, keepdim=False):\n return torch_frontend.amax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def amin(self, dim=None, keepdim=False):\n return torch_frontend.amin(self._ivy_array, dim=dim, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def aminmax(self, dim=None, keepdim=False):\n return torch_frontend.aminmax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def abs(self):\n return torch_frontend.abs(self._ivy_array)\n\n def abs_(self):\n self._ivy_array = self.abs().ivy_array\n return self\n\n def logical_and(self, other):\n return torch_frontend.logical_and(self._ivy_array, other)\n\n def bitwise_not(self, *, out=None):\n return torch_frontend.bitwise_not(self._ivy_array)\n\n def bitwise_and(self, other):\n return torch_frontend.bitwise_and(self._ivy_array, other)\n\n def bitwise_or(self, other, *, out=None):\n return torch_frontend.bitwise_or(self._ivy_array, other)\n\n @with_supported_dtypes({\"1.11.0 and below\": (\"integer\",)}, \"torch\")\n def bitwise_or_(self, other, *, out=None):\n self._ivy_array = self.bitwise_or(other, out=out).ivy_array\n return self\n\n def contiguous(self, memory_format=None):\n return torch_frontend.tensor(self.ivy_array)\n\n def new_ones(self, size, *, dtype=None, device=None, requires_grad=False):\n return torch_frontend.ones(\n size, dtype=dtype, device=device, requires_grad=requires_grad\n )\n\n def new_zeros(self, size, *, dtype=None, device=None, requires_grad=False):\n return torch_frontend.zeros(\n size, dtype=dtype, device=device, requires_grad=requires_grad\n )\n\n def to(self, *args, **kwargs):\n if len(args) > 0:\n if isinstance(args[0], (ivy.Dtype, ivy.NativeDtype)):\n if self.dtype == args[0]:\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(self._ivy_array, dtype=args[0])\n return cast_tensor\n if isinstance(args[0], (ivy.Device, ivy.NativeDevice, str)):\n if isinstance(args[0], str):\n ivy.utils.assertions.check_elem_in_list(\n args[0],\n [\n \"cpu\",\n \"cuda\",\n \"xpu\",\n \"mkldnn\",\n \"opengl\",\n \"opencl\",\n \"ideep\",\n \"hip\",\n \"ve\",\n \"ort\",\n \"mlc\",\n \"xla\",\n \"lazy\",\n \"vulkan\",\n \"meta\",\n \"hpu\",\n ],\n )\n if self.device == args[0]:\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(self._ivy_array, device=args[0])\n return cast_tensor\n else:\n if self.dtype == args[0].dtype and self.device == ivy.dev(args[0]):\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(\n self._ivy_array,\n dtype=args[0].dtype,\n device=args[0].device,\n )\n return cast_tensor\n else:\n if (\n \"dtype\" in kwargs\n and \"device\" in kwargs\n and self.dtype == kwargs[\"dtype\"]\n and self.device == kwargs[\"device\"]\n ):\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(\n self._ivy_array,\n device=kwargs[\"device\"] if \"device\" in kwargs else self.device,\n dtype=kwargs[\"dtype\"] if \"dtype\" in kwargs else self.dtype,\n )\n return cast_tensor\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctan(self):\n return torch_frontend.atan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctan_(self):\n self._ivy_array = self.arctan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def arctan2(self, other):\n return torch_frontend.arctan2(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def arctan2_(self, other):\n self._ivy_array = self.arctan2(other).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acos(self):\n return torch_frontend.acos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acos_(self):\n self._ivy_array = self.acos().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arccos(self):\n return torch_frontend.arccos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arccos_(self):\n self._ivy_array = self.arccos().ivy_array\n return self\n\n def new_tensor(\n self,\n data,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.asarray(data, copy=True, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def view_as(self, other):\n return self.view(other.shape)\n\n def expand(self, *args, size=None):\n if args and size:\n raise TypeError(\"expand() got multiple values for argument 'size'\")\n if args:\n if isinstance(args[0], (tuple, list)):\n size = args[0]\n else:\n size = args\n\n return torch_frontend.tensor(ivy.expand(self._ivy_array, tuple(size)))\n\n def expand_as(self, other):\n return self.expand(\n ivy.shape(other.ivy_array if isinstance(other, Tensor) else other)\n )\n\n def detach(self):\n return torch_frontend.tensor(\n ivy.stop_gradient(self._ivy_array, preserve_type=False)\n )\n\n def unsqueeze(self, dim):\n return torch_frontend.unsqueeze(self, dim)\n\n def unsqueeze_(self, dim):\n self._ivy_array = self.unsqueeze(dim).ivy_array\n return self\n\n def split(self, split_size, dim=0):\n return torch_frontend.split(self, split_size, dim)\n\n def tensor_split(self, indices_or_sections, dim=0):\n return torch_frontend.tensor_split(self.ivy_array, indices_or_sections, dim)\n\n def vsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.vsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def hsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.hsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def dsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.dsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def dim(self):\n return self._ivy_array.ndim\n\n def new_full(\n self,\n size,\n fill_value,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.full(size, fill_value, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def new_empty(\n self,\n size,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.empty(size, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def unfold(self, dimension, size, step):\n slices = []\n for i in range(0, self._ivy_array.shape[dimension] - size + 1, step):\n slices.append(self._ivy_array[i : i + size])\n return torch_frontend.stack(slices)\n\n def long(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.int64, copy=False)\n return self\n\n def max(self, dim=None, keepdim=False):\n return torch_frontend.max(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def is_cuda(self):\n return \"gpu\" in ivy.dev(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def pow(self, exponent):\n return torch_frontend.pow(self._ivy_array, exponent)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def pow_(self, exponent):\n self._ivy_array = self.pow(exponent).ivy_array\n return self\n\n def size(self, dim=None):\n shape = ivy.shape(self._ivy_array)\n if dim is None:\n return shape\n else:\n try:\n return shape[dim]\n except IndexError:\n raise IndexError(\n \"Dimension out of range (expected to be in range of [{}, {}], \"\n \"but got {}\".format(len(shape), len(shape) - 1, dim)\n )\n\n def matmul(self, other):\n return torch_frontend.matmul(self._ivy_array, other)\n\n def argwhere(self):\n return torch_frontend.argwhere(self._ivy_array)\n\n def argmax(self, dim=None, keepdim=False):\n return torch_frontend.argmax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def argmin(self, dim=None, keepdim=False):\n return torch_frontend.argmin(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def argsort(self, dim=-1, descending=False):\n return torch_frontend.argsort(self._ivy_array, dim=dim, descending=descending)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def ceil(self):\n return torch_frontend.ceil(self._ivy_array)\n\n def min(self, dim=None, keepdim=False):\n return torch_frontend.min(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def permute(self, *args, dims=None):\n if args and dims:\n raise TypeError(\"permute() got multiple values for argument 'dims'\")\n if dims is not None:\n return torch_frontend.permute(self._ivy_array, dims)\n if args:\n if isinstance(args[0], (tuple, list)):\n dims = args[0]\n return torch_frontend.permute(self._ivy_array, dims)\n else:\n return torch_frontend.permute(self._ivy_array, args)\n return torch_frontend.permute(self._ivy_array)\n\n def mean(self, dim=None, keepdim=False):\n return torch_frontend.mean(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def transpose(self, dim0, dim1):\n return torch_frontend.transpose(self._ivy_array, dim0=dim0, dim1=dim1)\n\n def transpose_(self, dim0, dim1):\n self._ivy_array = self.transpose(dim0, dim1).ivy_array\n return self\n\n def flatten(self, start_dim=0, end_dim=-1):\n return torch_frontend.flatten(self._ivy_array, start_dim, end_dim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumsum(self, dim, dtype):\n return torch_frontend.cumsum(self._ivy_array, dim, dtype=dtype)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumsum_(self, dim, *, dtype=None):\n self._ivy_array = self.cumsum(dim, dtype).ivy_array\n return self\n\n def inverse(self):\n return torch_frontend.inverse(self._ivy_array)\n\n def neg(self):\n return torch_frontend.negative(self._ivy_array)\n\n def int(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.int32, copy=False)\n return self\n\n def bool(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.bool, copy=False)\n return self\n\n def type(self, dtype=None, non_blocking=False, **kwargs):\n if ivy.exists(dtype):\n self._ivy_array = ivy.astype(self._ivy_array, dtype)\n return self\n else:\n return str(self._ivy_array.dtype)\n\n def type_as(self, other):\n if self.dtype != other.dtype:\n self._ivy_array = ivy.astype(self._ivy_array, other.dtype)\n return self\n else:\n pass\n\n def byte(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.uint8, copy=False)\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def ne(self, other):\n return torch_frontend.ne(self._ivy_array, other)\n\n def squeeze(self, dim):\n return torch_frontend.squeeze(self._ivy_array, dim)\n\n def flip(self, dims):\n return torch_frontend.flip(self._ivy_array, dims)\n\n def fliplr(self):\n return torch_frontend.fliplr(self._ivy_array)\n\n def sort(self, dim=-1, descending=False):\n return torch_frontend.sort(self._ivy_array, dim=dim, descending=descending)\n\n def tril(self, diagonal=0):\n return torch_frontend.tril(self._ivy_array, diagonal=diagonal)\n\n def index_select(self, dim, index):\n return torch_frontend.index_select(self._ivy_array, dim, index)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\", \"float16\")}, \"torch\")\n def clamp(self, min=None, max=None, *, out=None):\n if min is not None and max is not None and ivy.all(min > max):\n return torch_frontend.tensor(ivy.array(self._ivy_array).full_like(max))\n return torch_frontend.clamp(self._ivy_array, min=min, max=max, out=out)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\", \"float16\")}, \"torch\")\n def clamp_(self, min=None, max=None, *, out=None):\n self._ivy_array = self.clamp(min=min, max=max, out=out).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def sqrt(self):\n return torch_frontend.sqrt(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def sqrt_(self):\n self._ivy_array = self.sqrt().ivy_array\n return self\n\n def where(self, condition, other):\n # TODO: replace with torch_frontend.where when it's added\n return torch_frontend.tensor(ivy.where(condition, self._ivy_array, other))\n\n def clone(self, memory_format=None):\n return torch_frontend.tensor(ivy.array(self._ivy_array, copy=True))\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acosh(self):\n return torch_frontend.acosh(self._ivy_array)\n\n def real(self):\n return torch_frontend.real(self._ivy_array)\n\n def masked_fill(self, mask, value):\n # TODO: replace with torch_frontend.where when it's added\n return torch_frontend.tensor(ivy.where(mask, value, self._ivy_array))\n\n def masked_fill_(self, mask, value):\n self._ivy_array = self.masked_fill(mask, value).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acosh_(self):\n self._ivy_array = self.acosh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def numpy(self):\n return np_frontend_array(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sigmoid(self):\n return torch_frontend.sigmoid(self.ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def softmax(self, dim=None, dtype=None):\n return torch_frontend.nn.functional.softmax(\n self._ivy_array, dim=dim, dtype=dtype\n )\n\n def repeat(self, *args, repeats=None):\n if args and repeats:\n raise ivy.utils.exceptions.IvyException(\n \"repeat() got multiple values for argument 'repeats'\"\n )\n if args:\n if isinstance(args[0], (tuple, list)):\n repeats = args[0]\n else:\n repeats = args\n elif not isinstance(repeats, (tuple, list)):\n raise ivy.utils.exceptions.IvyException(\n \"repeat(): argument 'repeats' must be tuple of ints\"\n )\n\n return torch_frontend.tile(self._ivy_array, repeats)\n\n def unbind(self, dim=0):\n return torch_frontend.unbind(self._ivy_array, dim=dim)\n\n def bitwise_and_(self, other):\n self.ivy_array = self.bitwise_and(other).ivy_array\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def atan2_(self, other):\n self._ivy_array = self.atan2(other).ivy_array\n return self\n\n # Special Methods #\n # -------------------#\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __add__(self, other):\n return self.add(other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __mod__(self, other):\n return torch_frontend.remainder(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __pow__(self, exponent):\n return self.pow(exponent)\n\n def __long__(self, memory_format=None):\n return self.long()\n\n def __getitem__(self, query, /):\n ret = ivy.get_item(self._ivy_array, query)\n return torch_frontend.Tensor(ret, _init_overload=True)\n\n def __setitem__(self, key, value):\n if hasattr(value, \"ivy_array\"):\n value = (\n ivy.to_scalar(value.ivy_array)\n if value.shape == ()\n else ivy.to_list(value)\n )\n self._ivy_array[key] = value\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __radd__(self, other):\n return torch_frontend.add(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __mul__(self, other):\n return torch_frontend.mul(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __rmul__(self, other):\n return torch_frontend.mul(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __sub__(self, other):\n return torch_frontend.subtract(self._ivy_array, other)\n\n def __truediv__(self, other):\n return torch_frontend.div(self._ivy_array, other)\n\n def __iadd__(self, other):\n ret = torch_frontend.add(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __imod__(self, other):\n ret = torch_frontend.remainder(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __imul__(self, other):\n ret = torch_frontend.mul(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __isub__(self, other):\n ret = torch_frontend.subtract(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __itruediv__(self, other):\n ret = torch_frontend.div(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __eq__(self, other):\n return torch_frontend.equal(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __gt__(self, other):\n return torch_frontend.greater(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __ne__(self, other):\n return self.ne(other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __rsub__(self, other):\n return torch_frontend.subtract(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __lt__(self, other):\n return torch_frontend.less(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __or__(self, other):\n return torch_frontend.bitwise_or(self._ivy_array, other)\n\n def __invert__(self):\n return torch_frontend.bitwise_not(self._ivy_array)\n\n def __and__(self, other):\n return torch_frontend.bitwise_and(self, other)\n\n # Method aliases\n absolute, absolute_ = abs, abs_\n ndimension = dim\n\n def bitwise_xor(self, other, *, out=None):\n return torch_frontend.bitwise_xor(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumprod(self, dim, dtype):\n return torch_frontend.cumprod(self._ivy_array, dim, dtype=dtype)\n", "path": "ivy/functional/frontends/torch/tensor.py" } ]
[ { "content": "# global\n\n# local\nimport ivy\nimport ivy.functional.frontends.torch as torch_frontend\nimport ivy.functional.frontends.torch.nn.functional as torch_frontend_nn\nfrom ivy.functional.frontends.numpy.creation_routines.from_existing_data import (\n array as np_frontend_array,\n)\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.func_wrapper import with_supported_dtypes\n\n\nclass Tensor:\n def __init__(self, array, device=None, _init_overload=False):\n\n if _init_overload:\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n else:\n self._ivy_array = ivy.array(\n array, dtype=torch_frontend.float32, device=device\n )\n\n def __repr__(self):\n return str(self._ivy_array.__repr__()).replace(\n \"ivy.array\", \"ivy.frontends.torch.Tensor\"\n )\n\n # Properties #\n # ---------- #\n\n @property\n def ivy_array(self):\n return self._ivy_array\n\n @property\n def device(self):\n return ivy.dev(self._ivy_array)\n\n @property\n def dtype(self):\n return self._ivy_array.dtype\n\n @property\n def shape(self):\n return self._ivy_array.shape\n\n # Setters #\n # --------#\n\n @ivy_array.setter\n def ivy_array(self, array):\n self._ivy_array = (\n ivy.array(array) if not isinstance(array, ivy.Array) else array\n )\n\n # Instance Methods #\n # ---------------- #\n def reshape(self, *args, shape=None):\n if args and shape:\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\n if shape is not None:\n return torch_frontend.reshape(self._ivy_array, shape)\n if args:\n if isinstance(args[0], (tuple, list)):\n shape = args[0]\n return torch_frontend.reshape(self._ivy_array, shape)\n else:\n return torch_frontend.reshape(self._ivy_array, args)\n return torch_frontend.reshape(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def reshape_as(self, other):\n return torch_frontend.reshape(self, other.shape)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def add(self, other, *, alpha=1):\n return torch_frontend.add(self._ivy_array, other, alpha=alpha)\n\n def chunk(self, chunks, dim=0):\n return torch_frontend.chunk(self._ivy_array, chunks, dim=dim)\n\n def any(self, dim=None, keepdim=False, *, out=None):\n return torch_frontend.any(self._ivy_array, dim=dim, keepdim=keepdim, out=out)\n\n def all(self, dim=None, keepdim=False):\n return torch_frontend.all(self._ivy_array, dim=dim, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def add_(self, other, *, alpha=1):\n self._ivy_array = self.add(other, alpha=alpha).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asin(self):\n return torch_frontend.asin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asin_(self):\n self._ivy_array = self.asin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def sum(self):\n return torch_frontend.sum(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sin(self):\n return torch_frontend.sin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sin_(self):\n self._ivy_array = self.sin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sinh(self):\n return torch_frontend.sinh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sinh_(self):\n self._ivy_array = self.sinh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cos(self):\n return torch_frontend.cos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cos_(self):\n self._ivy_array = self.cos().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cosh(self):\n return torch_frontend.cosh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cosh_(self):\n self._ivy_array = self.cosh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arcsin(self):\n return torch_frontend.arcsin(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arcsin_(self):\n self._ivy_array = self.arcsin().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atan(self):\n return torch_frontend.atan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atan_(self):\n self._ivy_array = self.atan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def atan2(self, other):\n return torch_frontend.atan2(self._ivy_array, other)\n\n def view(self, *args, shape=None):\n \"\"\"\n Reshape Tensor.\n\n possible arguments are either:\n - size\n - tuple of ints\n - list of ints\n - torch.Size object\n - ints\n Parameters\n ----------\n args:int arguments\n shape: optional shape\n\n Returns reshaped tensor\n -------\n \"\"\"\n if shape and not args:\n shape_tup = shape\n elif args and not shape:\n if (\n isinstance(args[0], tuple)\n or isinstance(args[0], list)\n or type(args[0]).__name__ == \"Size\"\n ) and len(args) == 1:\n shape_tup = args[0]\n else:\n shape_tup = args\n else:\n raise ValueError(\n \"View only accepts as argument ints, tuple or list of ints or \"\n \"the keyword argument size.\"\n )\n return torch_frontend.reshape(self._ivy_array, shape_tup)\n\n def float(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.float32, copy=False)\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asinh(self):\n return torch_frontend.asinh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def asinh_(self):\n self._ivy_array = self.asinh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tan(self):\n return torch_frontend.tan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tan_(self):\n self._ivy_array = self.tan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tanh(self):\n return torch_frontend.tanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def tanh_(self):\n self._ivy_array = self.tanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atanh(self):\n return torch_frontend.atanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def atanh_(self):\n self._ivy_array = self.atanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctanh(self):\n return torch_frontend.arctanh(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctanh_(self):\n self._ivy_array = self.arctanh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log(self):\n return torch_frontend.log(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log_(self):\n self._ivy_array = self.log().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def log2(self):\n return torch_frontend.log2(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def relu(self):\n return torch_frontend_nn.relu(self._ivy_array)\n\n def amax(self, dim=None, keepdim=False):\n return torch_frontend.amax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def amin(self, dim=None, keepdim=False):\n return torch_frontend.amin(self._ivy_array, dim=dim, keepdim=keepdim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def aminmax(self, dim=None, keepdim=False):\n return torch_frontend.aminmax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def abs(self):\n return torch_frontend.abs(self._ivy_array)\n\n def abs_(self):\n self._ivy_array = self.abs().ivy_array\n return self\n\n def logical_and(self, other):\n return torch_frontend.logical_and(self._ivy_array, other)\n\n def bitwise_not(self, *, out=None):\n return torch_frontend.bitwise_not(self._ivy_array)\n\n def bitwise_and(self, other):\n return torch_frontend.bitwise_and(self._ivy_array, other)\n\n def bitwise_or(self, other, *, out=None):\n return torch_frontend.bitwise_or(self._ivy_array, other)\n\n @with_supported_dtypes({\"1.11.0 and below\": (\"integer\",)}, \"torch\")\n def bitwise_or_(self, other, *, out=None):\n self._ivy_array = self.bitwise_or(other, out=out).ivy_array\n return self\n\n def contiguous(self, memory_format=None):\n return torch_frontend.tensor(self.ivy_array)\n\n def new_ones(self, size, *, dtype=None, device=None, requires_grad=False):\n return torch_frontend.ones(\n size, dtype=dtype, device=device, requires_grad=requires_grad\n )\n\n def new_zeros(self, size, *, dtype=None, device=None, requires_grad=False):\n return torch_frontend.zeros(\n size, dtype=dtype, device=device, requires_grad=requires_grad\n )\n\n def to(self, *args, **kwargs):\n if len(args) > 0:\n if isinstance(args[0], (ivy.Dtype, ivy.NativeDtype)):\n if self.dtype == args[0]:\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(self._ivy_array, dtype=args[0])\n return cast_tensor\n if isinstance(args[0], (ivy.Device, ivy.NativeDevice, str)):\n if isinstance(args[0], str):\n ivy.utils.assertions.check_elem_in_list(\n args[0],\n [\n \"cpu\",\n \"cuda\",\n \"xpu\",\n \"mkldnn\",\n \"opengl\",\n \"opencl\",\n \"ideep\",\n \"hip\",\n \"ve\",\n \"ort\",\n \"mlc\",\n \"xla\",\n \"lazy\",\n \"vulkan\",\n \"meta\",\n \"hpu\",\n ],\n )\n if self.device == args[0]:\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(self._ivy_array, device=args[0])\n return cast_tensor\n else:\n if self.dtype == args[0].dtype and self.device == ivy.dev(args[0]):\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(\n self._ivy_array,\n dtype=args[0].dtype,\n device=args[0].device,\n )\n return cast_tensor\n else:\n if (\n \"dtype\" in kwargs\n and \"device\" in kwargs\n and self.dtype == kwargs[\"dtype\"]\n and self.device == kwargs[\"device\"]\n ):\n return self\n else:\n cast_tensor = self.clone()\n cast_tensor.ivy_array = ivy.asarray(\n self._ivy_array,\n device=kwargs[\"device\"] if \"device\" in kwargs else self.device,\n dtype=kwargs[\"dtype\"] if \"dtype\" in kwargs else self.dtype,\n )\n return cast_tensor\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctan(self):\n return torch_frontend.atan(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arctan_(self):\n self._ivy_array = self.arctan().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def arctan2(self, other):\n return torch_frontend.arctan2(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def arctan2_(self, other):\n self._ivy_array = self.arctan2(other).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acos(self):\n return torch_frontend.acos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acos_(self):\n self._ivy_array = self.acos().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arccos(self):\n return torch_frontend.arccos(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def arccos_(self):\n self._ivy_array = self.arccos().ivy_array\n return self\n\n def new_tensor(\n self,\n data,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.asarray(data, copy=True, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def view_as(self, other):\n return self.view(other.shape)\n\n def expand(self, *args, size=None):\n if args and size:\n raise TypeError(\"expand() got multiple values for argument 'size'\")\n if args:\n if isinstance(args[0], (tuple, list)):\n size = args[0]\n else:\n size = args\n\n return torch_frontend.tensor(ivy.expand(self._ivy_array, tuple(size)))\n\n def expand_as(self, other):\n return self.expand(\n ivy.shape(other.ivy_array if isinstance(other, Tensor) else other)\n )\n\n def detach(self):\n return torch_frontend.tensor(\n ivy.stop_gradient(self._ivy_array, preserve_type=False)\n )\n\n def unsqueeze(self, dim):\n return torch_frontend.unsqueeze(self, dim)\n\n def unsqueeze_(self, dim):\n self._ivy_array = self.unsqueeze(dim).ivy_array\n return self\n\n def split(self, split_size, dim=0):\n return torch_frontend.split(self, split_size, dim)\n\n def tensor_split(self, indices_or_sections, dim=0):\n return torch_frontend.tensor_split(self.ivy_array, indices_or_sections, dim)\n\n def vsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.vsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def hsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.hsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def dsplit(self, indices_or_sections=None, /, *, indices=None, sections=None):\n return torch_frontend.dsplit(\n self.ivy_array, indices_or_sections, indices=indices, sections=sections\n )\n\n def dim(self):\n return self._ivy_array.ndim\n\n def new_full(\n self,\n size,\n fill_value,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.full(size, fill_value, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def new_empty(\n self,\n size,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n layout=None,\n pin_memory=False,\n ):\n dtype = ivy.dtype(self._ivy_array) if dtype is None else dtype\n device = ivy.dev(self._ivy_array) if device is None else device\n _data = ivy.empty(size, dtype=dtype, device=device)\n return torch_frontend.tensor(_data)\n\n def unfold(self, dimension, size, step):\n slices = []\n for i in range(0, self._ivy_array.shape[dimension] - size + 1, step):\n slices.append(self._ivy_array[i : i + size])\n return torch_frontend.stack(slices)\n\n def long(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.int64, copy=False)\n return self\n\n def max(self, dim=None, keepdim=False):\n return torch_frontend.max(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def is_cuda(self):\n return \"gpu\" in ivy.dev(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def pow(self, exponent):\n return torch_frontend.pow(self._ivy_array, exponent)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def pow_(self, exponent):\n self._ivy_array = self.pow(exponent).ivy_array\n return self\n\n def size(self, dim=None):\n shape = ivy.shape(self._ivy_array)\n if dim is None:\n return shape\n else:\n try:\n return shape[dim]\n except IndexError:\n raise IndexError(\n \"Dimension out of range (expected to be in range of [{}, {}], \"\n \"but got {}\".format(len(shape), len(shape) - 1, dim)\n )\n\n def matmul(self, other):\n return torch_frontend.matmul(self._ivy_array, other)\n\n def argwhere(self):\n return torch_frontend.argwhere(self._ivy_array)\n\n def argmax(self, dim=None, keepdim=False):\n return torch_frontend.argmax(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def argmin(self, dim=None, keepdim=False):\n return torch_frontend.argmin(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def argsort(self, dim=-1, descending=False):\n return torch_frontend.argsort(self._ivy_array, dim=dim, descending=descending)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def ceil(self):\n return torch_frontend.ceil(self._ivy_array)\n\n def min(self, dim=None, keepdim=False):\n return torch_frontend.min(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def permute(self, *args, dims=None):\n if args and dims:\n raise TypeError(\"permute() got multiple values for argument 'dims'\")\n if dims is not None:\n return torch_frontend.permute(self._ivy_array, dims)\n if args:\n if isinstance(args[0], (tuple, list)):\n dims = args[0]\n return torch_frontend.permute(self._ivy_array, dims)\n else:\n return torch_frontend.permute(self._ivy_array, args)\n return torch_frontend.permute(self._ivy_array)\n\n def mean(self, dim=None, keepdim=False):\n return torch_frontend.mean(self._ivy_array, dim=dim, keepdim=keepdim)\n\n def transpose(self, dim0, dim1):\n return torch_frontend.transpose(self._ivy_array, dim0=dim0, dim1=dim1)\n\n def transpose_(self, dim0, dim1):\n self._ivy_array = self.transpose(dim0, dim1).ivy_array\n return self\n\n def flatten(self, start_dim=0, end_dim=-1):\n return torch_frontend.flatten(self._ivy_array, start_dim, end_dim)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumsum(self, dim, dtype):\n return torch_frontend.cumsum(self._ivy_array, dim, dtype=dtype)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumsum_(self, dim, *, dtype=None):\n self._ivy_array = self.cumsum(dim, dtype).ivy_array\n return self\n\n def inverse(self):\n return torch_frontend.inverse(self._ivy_array)\n\n def neg(self):\n return torch_frontend.negative(self._ivy_array)\n\n def int(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.int32, copy=False)\n return self\n\n def bool(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.bool, copy=False)\n return self\n\n def type(self, dtype=None, non_blocking=False, **kwargs):\n if ivy.exists(dtype):\n self._ivy_array = ivy.astype(self._ivy_array, dtype)\n return self\n else:\n return str(self._ivy_array.dtype)\n\n def type_as(self, other):\n if self.dtype != other.dtype:\n self._ivy_array = ivy.astype(self._ivy_array, other.dtype)\n return self\n else:\n pass\n\n def byte(self, memory_format=None):\n self._ivy_array = ivy.astype(self.ivy_array, ivy.uint8, copy=False)\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def ne(self, other):\n return torch_frontend.ne(self._ivy_array, other)\n\n def squeeze(self, dim):\n return torch_frontend.squeeze(self._ivy_array, dim)\n\n def flip(self, dims):\n return torch_frontend.flip(self._ivy_array, dims)\n\n def fliplr(self):\n return torch_frontend.fliplr(self._ivy_array)\n\n def sort(self, dim=-1, descending=False):\n return torch_frontend.sort(self._ivy_array, dim=dim, descending=descending)\n\n def tril(self, diagonal=0):\n return torch_frontend.tril(self._ivy_array, diagonal=diagonal)\n\n def index_select(self, dim, index):\n return torch_frontend.index_select(self._ivy_array, dim, index)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\", \"float16\")}, \"torch\")\n def clamp(self, min=None, max=None, *, out=None):\n if min is not None and max is not None and ivy.all(min > max):\n return torch_frontend.tensor(ivy.array(self._ivy_array).full_like(max))\n return torch_frontend.clamp(self._ivy_array, min=min, max=max, out=out)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\", \"float16\")}, \"torch\")\n def clamp_(self, min=None, max=None, *, out=None):\n self._ivy_array = self.clamp(min=min, max=max, out=out).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def sqrt(self):\n return torch_frontend.sqrt(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def sqrt_(self):\n self._ivy_array = self.sqrt().ivy_array\n return self\n\n def where(self, condition, other):\n # TODO: replace with torch_frontend.where when it's added\n return torch_frontend.tensor(ivy.where(condition, self._ivy_array, other))\n\n def clone(self, memory_format=None):\n return torch_frontend.tensor(ivy.array(self._ivy_array, copy=True))\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acosh(self):\n return torch_frontend.acosh(self._ivy_array)\n\n def real(self):\n return torch_frontend.real(self._ivy_array)\n\n def masked_fill(self, mask, value):\n # TODO: replace with torch_frontend.where when it's added\n return torch_frontend.tensor(ivy.where(mask, value, self._ivy_array))\n\n def masked_fill_(self, mask, value):\n self._ivy_array = self.masked_fill(mask, value).ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def acosh_(self):\n self._ivy_array = self.acosh().ivy_array\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def numpy(self):\n return np_frontend_array(self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def sigmoid(self):\n return torch_frontend.sigmoid(self.ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def softmax(self, dim=None, dtype=None):\n return torch_frontend.nn.functional.softmax(\n self._ivy_array, dim=dim, dtype=dtype\n )\n\n def repeat(self, *args, repeats=None):\n if args and repeats:\n raise ivy.utils.exceptions.IvyException(\n \"repeat() got multiple values for argument 'repeats'\"\n )\n if args:\n if isinstance(args[0], (tuple, list)):\n repeats = args[0]\n else:\n repeats = args\n elif not isinstance(repeats, (tuple, list)):\n raise ivy.utils.exceptions.IvyException(\n \"repeat(): argument 'repeats' must be tuple of ints\"\n )\n\n return torch_frontend.tile(self._ivy_array, repeats)\n\n def unbind(self, dim=0):\n return torch_frontend.unbind(self._ivy_array, dim=dim)\n\n def bitwise_and_(self, other):\n self.ivy_array = self.bitwise_and(other).ivy_array\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n def atan2_(self, other):\n self._ivy_array = self.atan2(other).ivy_array\n return self\n \n def fmin(self, other, out=None):\n return torch_frontend.fmin(self._ivy_array, other, out=out)\n\n # Special Methods #\n # -------------------#\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __add__(self, other):\n return self.add(other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __mod__(self, other):\n return torch_frontend.remainder(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __pow__(self, exponent):\n return self.pow(exponent)\n\n def __long__(self, memory_format=None):\n return self.long()\n\n def __getitem__(self, query, /):\n ret = ivy.get_item(self._ivy_array, query)\n return torch_frontend.Tensor(ret, _init_overload=True)\n\n def __setitem__(self, key, value):\n if hasattr(value, \"ivy_array\"):\n value = (\n ivy.to_scalar(value.ivy_array)\n if value.shape == ()\n else ivy.to_list(value)\n )\n self._ivy_array[key] = value\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __radd__(self, other):\n return torch_frontend.add(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __mul__(self, other):\n return torch_frontend.mul(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __rmul__(self, other):\n return torch_frontend.mul(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __sub__(self, other):\n return torch_frontend.subtract(self._ivy_array, other)\n\n def __truediv__(self, other):\n return torch_frontend.div(self._ivy_array, other)\n\n def __iadd__(self, other):\n ret = torch_frontend.add(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __imod__(self, other):\n ret = torch_frontend.remainder(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __imul__(self, other):\n ret = torch_frontend.mul(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __isub__(self, other):\n ret = torch_frontend.subtract(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n def __itruediv__(self, other):\n ret = torch_frontend.div(self._ivy_array, other)\n self.ivy_array = ivy.inplace_update(\n self._ivy_array, ivy.astype(ret.ivy_array, self.dtype)\n )\n return self\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __eq__(self, other):\n return torch_frontend.equal(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __gt__(self, other):\n return torch_frontend.greater(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __ne__(self, other):\n return self.ne(other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __rsub__(self, other):\n return torch_frontend.subtract(other, self._ivy_array)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __lt__(self, other):\n return torch_frontend.less(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"bfloat16\",)}, \"torch\")\n def __or__(self, other):\n return torch_frontend.bitwise_or(self._ivy_array, other)\n\n def __invert__(self):\n return torch_frontend.bitwise_not(self._ivy_array)\n\n def __and__(self, other):\n return torch_frontend.bitwise_and(self, other)\n\n # Method aliases\n absolute, absolute_ = abs, abs_\n ndimension = dim\n\n def bitwise_xor(self, other, *, out=None):\n return torch_frontend.bitwise_xor(self._ivy_array, other)\n\n @with_unsupported_dtypes({\"1.11.0 and below\": (\"float16\",)}, \"torch\")\n def cumprod(self, dim, dtype):\n return torch_frontend.cumprod(self._ivy_array, dim, dtype=dtype)\n", "path": "ivy/functional/frontends/torch/tensor.py" } ]
diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index 082462226bcd9..3b0e3799e8c08 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -754,6 +754,9 @@ def bitwise_and_(self, other): def atan2_(self, other): self._ivy_array = self.atan2(other).ivy_array return self + + def fmin(self, other, out=None): + return torch_frontend.fmin(self._ivy_array, other, out=out) # Special Methods # # -------------------# diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index a334442677337..aedf73b5b1745 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -5843,7 +5843,6 @@ def test_torch_instance_bitwise_xor( on_device=on_device, ) - # cumprod @handle_frontend_method( class_tree=CLASS_TREE, @@ -5921,3 +5920,39 @@ def test_torch_instance_relu( frontend=frontend, on_device=on_device, ) + + +# fmin +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="torch.tensor", + method_name="fmin", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + num_arrays=2, + ), +) +def test_torch_instance_fmin( + dtype_and_x, + frontend_method_data, + init_flags, + method_flags, + frontend, + on_device, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + init_all_as_kwargs_np={ + "data": x[0], + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={ + "other": x[1], + }, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + on_device=on_device, + )
Chia-Network__chia-blockchain-13374
Remove or Set to 0 DataLayer default fee
[ { "content": "from __future__ import annotations\n\nimport dataclasses\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, cast\n\nfrom chia.data_layer.data_layer_errors import OfferIntegrityError\nfrom chia.data_layer.data_layer_util import (\n CancelOfferRequest,\n CancelOfferResponse,\n ClearPendingRootsRequest,\n ClearPendingRootsResponse,\n DLProof,\n GetProofRequest,\n GetProofResponse,\n HashOnlyProof,\n MakeOfferRequest,\n MakeOfferResponse,\n ProofLayer,\n Side,\n StoreProofsHashes,\n Subscription,\n TakeOfferRequest,\n TakeOfferResponse,\n VerifyOfferResponse,\n VerifyProofResponse,\n)\nfrom chia.data_layer.data_layer_wallet import DataLayerWallet, Mirror, verify_offer\nfrom chia.rpc.data_layer_rpc_util import marshal\nfrom chia.rpc.rpc_server import Endpoint, EndpointResult\nfrom chia.rpc.util import marshal as streamable_marshal\nfrom chia.types.blockchain_format.sized_bytes import bytes32\nfrom chia.util.byte_types import hexstr_to_bytes\n\n# todo input assertions for all rpc's\nfrom chia.util.ints import uint8, uint64\nfrom chia.util.streamable import recurse_jsonify\nfrom chia.util.ws_message import WsRpcMessage\nfrom chia.wallet.trading.offer import Offer as TradingOffer\n\nif TYPE_CHECKING:\n from chia.data_layer.data_layer import DataLayer\n\n\ndef process_change(change: Dict[str, Any]) -> Dict[str, Any]:\n # TODO: A full class would likely be nice for this so downstream doesn't\n # have to deal with maybe-present attributes or Dict[str, Any] hints.\n reference_node_hash = change.get(\"reference_node_hash\")\n if reference_node_hash is not None:\n reference_node_hash = bytes32(hexstr_to_bytes(reference_node_hash))\n\n side = change.get(\"side\")\n if side is not None:\n side = Side(side)\n\n value = change.get(\"value\")\n if value is not None:\n value = hexstr_to_bytes(value)\n\n return {\n **change,\n \"key\": hexstr_to_bytes(change[\"key\"]),\n \"value\": value,\n \"reference_node_hash\": reference_node_hash,\n \"side\": side,\n }\n\n\ndef get_fee(config: Dict[str, Any], request: Dict[str, Any]) -> uint64:\n fee = request.get(\"fee\")\n if fee is None:\n config_fee = config.get(\"fee\", 0)\n return uint64(config_fee)\n return uint64(fee)\n\n\nclass DataLayerRpcApi:\n # TODO: other RPC APIs do not accept a wallet and the service start does not expect to provide one\n def __init__(self, data_layer: DataLayer): # , wallet: DataLayerWallet):\n self.service: DataLayer = data_layer\n self.service_name = \"chia_data_layer\"\n\n def get_routes(self) -> Dict[str, Endpoint]:\n return {\n \"/wallet_log_in\": self.wallet_log_in,\n \"/create_data_store\": self.create_data_store,\n \"/get_owned_stores\": self.get_owned_stores,\n \"/batch_update\": self.batch_update,\n \"/submit_pending_root\": self.submit_pending_root,\n \"/get_value\": self.get_value,\n \"/get_keys\": self.get_keys,\n \"/get_keys_values\": self.get_keys_values,\n \"/get_ancestors\": self.get_ancestors,\n \"/get_root\": self.get_root,\n \"/get_local_root\": self.get_local_root,\n \"/get_roots\": self.get_roots,\n \"/delete_key\": self.delete_key,\n \"/insert\": self.insert,\n \"/subscribe\": self.subscribe,\n \"/unsubscribe\": self.unsubscribe,\n \"/add_mirror\": self.add_mirror,\n \"/delete_mirror\": self.delete_mirror,\n \"/get_mirrors\": self.get_mirrors,\n \"/remove_subscriptions\": self.remove_subscriptions,\n \"/subscriptions\": self.subscriptions,\n \"/get_kv_diff\": self.get_kv_diff,\n \"/get_root_history\": self.get_root_history,\n \"/add_missing_files\": self.add_missing_files,\n \"/make_offer\": self.make_offer,\n \"/take_offer\": self.take_offer,\n \"/verify_offer\": self.verify_offer,\n \"/cancel_offer\": self.cancel_offer,\n \"/get_sync_status\": self.get_sync_status,\n \"/check_plugins\": self.check_plugins,\n \"/clear_pending_roots\": self.clear_pending_roots,\n \"/get_proof\": self.get_proof,\n \"/verify_proof\": self.verify_proof,\n }\n\n async def _state_changed(self, change: str, change_data: Optional[Dict[str, Any]]) -> List[WsRpcMessage]:\n return []\n\n async def wallet_log_in(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n fingerprint = cast(int, request[\"fingerprint\"])\n await self.service.wallet_log_in(fingerprint=fingerprint)\n return {}\n\n async def create_data_store(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n fee = get_fee(self.service.config, request)\n verbose = request.get(\"verbose\", False)\n txs, value = await self.service.create_store(uint64(fee))\n if verbose:\n return {\"txs\": txs, \"id\": value.hex()}\n else:\n return {\"id\": value.hex()}\n\n async def get_owned_stores(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n singleton_records = await self.service.get_owned_stores()\n return {\"store_ids\": [singleton.launcher_id.hex() for singleton in singleton_records]}\n\n async def get_value(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32.from_hexstr(request[\"id\"])\n key = hexstr_to_bytes(request[\"key\"])\n root_hash = request.get(\"root_hash\")\n if root_hash is not None:\n root_hash = bytes32.from_hexstr(root_hash)\n if self.service is None:\n raise Exception(\"Data layer not created\")\n value = await self.service.get_value(store_id=store_id, key=key, root_hash=root_hash)\n hex = None\n if value is not None:\n hex = value.hex()\n return {\"value\": hex}\n\n async def get_keys(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32.from_hexstr(request[\"id\"])\n root_hash = request.get(\"root_hash\")\n page = request.get(\"page\", None)\n max_page_size = request.get(\"max_page_size\", None)\n if root_hash is not None:\n root_hash = bytes32.from_hexstr(root_hash)\n if self.service is None:\n raise Exception(\"Data layer not created\")\n\n if page is None:\n keys = await self.service.get_keys(store_id, root_hash)\n else:\n keys_paginated = await self.service.get_keys_paginated(store_id, root_hash, page, max_page_size)\n keys = keys_paginated.keys\n\n if keys == [] and root_hash is not None and root_hash != bytes32([0] * 32):\n raise Exception(f\"Can't find keys for {root_hash}\")\n\n response: EndpointResult = {\"keys\": [f\"0x{key.hex()}\" for key in keys]}\n\n if page is not None:\n response.update(\n {\n \"total_pages\": keys_paginated.total_pages,\n \"total_bytes\": keys_paginated.total_bytes,\n \"root_hash\": keys_paginated.root_hash,\n },\n )\n\n return response\n\n async def get_keys_values(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n root_hash = request.get(\"root_hash\")\n page = request.get(\"page\", None)\n max_page_size = request.get(\"max_page_size\", None)\n if root_hash is not None:\n root_hash = bytes32.from_hexstr(root_hash)\n if self.service is None:\n raise Exception(\"Data layer not created\")\n\n if page is None:\n keys_values = await self.service.get_keys_values(store_id, root_hash)\n else:\n keys_values_paginated = await self.service.get_keys_values_paginated(\n store_id, root_hash, page, max_page_size\n )\n keys_values = keys_values_paginated.keys_values\n\n json_nodes = [recurse_jsonify(dataclasses.asdict(node)) for node in keys_values]\n if not json_nodes and root_hash is not None and root_hash != bytes32([0] * 32):\n raise Exception(f\"Can't find keys and values for {root_hash}\")\n\n response: EndpointResult = {\"keys_values\": json_nodes}\n\n if page is not None:\n response.update(\n {\n \"total_pages\": keys_values_paginated.total_pages,\n \"total_bytes\": keys_values_paginated.total_bytes,\n \"root_hash\": keys_values_paginated.root_hash,\n },\n )\n\n return response\n\n async def get_ancestors(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n node_hash = bytes32.from_hexstr(request[\"hash\"])\n if self.service is None:\n raise Exception(\"Data layer not created\")\n value = await self.service.get_ancestors(node_hash, store_id)\n return {\"ancestors\": value}\n\n async def batch_update(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n id - the id of the store we are operating on\n changelist - a list of changes to apply on store\n \"\"\"\n fee = get_fee(self.service.config, request)\n changelist = [process_change(change) for change in request[\"changelist\"]]\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n submit_on_chain = request.get(\"submit_on_chain\", True)\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n transaction_record = await self.service.batch_update(store_id, changelist, uint64(fee), submit_on_chain)\n if submit_on_chain:\n if transaction_record is None:\n raise Exception(f\"Batch update failed for: {store_id}\")\n return {\"tx_id\": transaction_record.name}\n else:\n if transaction_record is not None:\n raise Exception(\"Transaction submitted on chain, but submit_on_chain set to False\")\n return {}\n\n async def submit_pending_root(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n fee = get_fee(self.service.config, request)\n transaction_record = await self.service.submit_pending_root(store_id, uint64(fee))\n return {\"tx_id\": transaction_record.name}\n\n async def insert(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n rows_to_add a list of clvm objects as bytes to add to table\n rows_to_remove a list of row hashes to remove\n \"\"\"\n fee = get_fee(self.service.config, request)\n key = hexstr_to_bytes(request[\"key\"])\n value = hexstr_to_bytes(request[\"value\"])\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n changelist = [{\"action\": \"insert\", \"key\": key, \"value\": value}]\n transaction_record = await self.service.batch_update(store_id, changelist, uint64(fee))\n assert transaction_record is not None\n return {\"tx_id\": transaction_record.name}\n\n async def delete_key(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n rows_to_add a list of clvm objects as bytes to add to table\n rows_to_remove a list of row hashes to remove\n \"\"\"\n fee = get_fee(self.service.config, request)\n key = hexstr_to_bytes(request[\"key\"])\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n changelist = [{\"action\": \"delete\", \"key\": key}]\n transaction_record = await self.service.batch_update(store_id, changelist, uint64(fee))\n assert transaction_record is not None\n return {\"tx_id\": transaction_record.name}\n\n async def get_root(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"get hash of latest tree root\"\"\"\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n rec = await self.service.get_root(store_id)\n if rec is None:\n raise Exception(f\"Failed to get root for {store_id.hex()}\")\n return {\"hash\": rec.root, \"confirmed\": rec.confirmed, \"timestamp\": rec.timestamp}\n\n async def get_local_root(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"get hash of latest tree root saved in our local datastore\"\"\"\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n res = await self.service.get_local_root(store_id)\n return {\"hash\": res}\n\n async def get_roots(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n get state hashes for a list of roots\n \"\"\"\n store_ids = request[\"ids\"]\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n roots = []\n for id in store_ids:\n id_bytes = bytes32.from_hexstr(id)\n rec = await self.service.get_root(id_bytes)\n if rec is not None:\n roots.append({\"id\": id_bytes, \"hash\": rec.root, \"confirmed\": rec.confirmed, \"timestamp\": rec.timestamp})\n return {\"root_hashes\": roots}\n\n async def subscribe(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n subscribe to singleton\n \"\"\"\n store_id = request.get(\"id\")\n if store_id is None:\n raise Exception(\"missing store id in request\")\n\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id_bytes = bytes32.from_hexstr(store_id)\n urls = request.get(\"urls\", [])\n await self.service.subscribe(store_id=store_id_bytes, urls=urls)\n return {}\n\n async def unsubscribe(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n unsubscribe from singleton\n \"\"\"\n store_id = request.get(\"id\")\n retain_data = request.get(\"retain\", False)\n if store_id is None:\n raise Exception(\"missing store id in request\")\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id_bytes = bytes32.from_hexstr(store_id)\n await self.service.unsubscribe(store_id_bytes, retain_data)\n return {}\n\n async def subscriptions(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n List current subscriptions\n \"\"\"\n if self.service is None:\n raise Exception(\"Data layer not created\")\n subscriptions: List[Subscription] = await self.service.get_subscriptions()\n return {\"store_ids\": [sub.tree_id.hex() for sub in subscriptions]}\n\n async def remove_subscriptions(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id = request.get(\"id\")\n if store_id is None:\n raise Exception(\"missing store id in request\")\n store_id_bytes = bytes32.from_hexstr(store_id)\n urls = request[\"urls\"]\n await self.service.remove_subscriptions(store_id=store_id_bytes, urls=urls)\n return {}\n\n async def add_missing_files(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n complete the data server files.\n \"\"\"\n if \"ids\" in request:\n store_ids = request[\"ids\"]\n ids_bytes = [bytes32.from_hexstr(id) for id in store_ids]\n else:\n subscriptions: List[Subscription] = await self.service.get_subscriptions()\n ids_bytes = [subscription.tree_id for subscription in subscriptions]\n overwrite = request.get(\"overwrite\", False)\n foldername: Optional[Path] = None\n if \"foldername\" in request:\n foldername = Path(request[\"foldername\"])\n for tree_id in ids_bytes:\n await self.service.add_missing_files(tree_id, overwrite, foldername)\n return {}\n\n async def get_root_history(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n get history of state hashes for a store\n \"\"\"\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n records = await self.service.get_root_history(id_bytes)\n res: List[Dict[str, Any]] = []\n for rec in records:\n res.insert(0, {\"root_hash\": rec.root, \"confirmed\": rec.confirmed, \"timestamp\": rec.timestamp})\n return {\"root_history\": res}\n\n async def get_kv_diff(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n get kv diff between two root hashes\n \"\"\"\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n hash_1 = request[\"hash_1\"]\n hash_1_bytes = bytes32.from_hexstr(hash_1)\n hash_2 = request[\"hash_2\"]\n hash_2_bytes = bytes32.from_hexstr(hash_2)\n page = request.get(\"page\", None)\n max_page_size = request.get(\"max_page_size\", None)\n res: List[Dict[str, Any]] = []\n\n if page is None:\n records_dict = await self.service.get_kv_diff(id_bytes, hash_1_bytes, hash_2_bytes)\n records = list(records_dict)\n else:\n kv_diff_paginated = await self.service.get_kv_diff_paginated(\n id_bytes, hash_1_bytes, hash_2_bytes, page, max_page_size\n )\n records = kv_diff_paginated.kv_diff\n\n for rec in records:\n res.append({\"type\": rec.type.name, \"key\": rec.key.hex(), \"value\": rec.value.hex()})\n\n response: EndpointResult = {\"diff\": res}\n if page is not None:\n response.update(\n {\n \"total_pages\": kv_diff_paginated.total_pages,\n \"total_bytes\": kv_diff_paginated.total_bytes,\n },\n )\n\n return response\n\n async def add_mirror(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n urls = request[\"urls\"]\n amount = request[\"amount\"]\n fee = get_fee(self.service.config, request)\n await self.service.add_mirror(id_bytes, urls, amount, fee)\n return {}\n\n async def delete_mirror(self, request: Dict[str, Any]) -> EndpointResult:\n coin_id = request[\"coin_id\"]\n coin_id_bytes = bytes32.from_hexstr(coin_id)\n fee = get_fee(self.service.config, request)\n await self.service.delete_mirror(coin_id_bytes, fee)\n return {}\n\n async def get_mirrors(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n mirrors: List[Mirror] = await self.service.get_mirrors(id_bytes)\n return {\"mirrors\": [mirror.to_json_dict() for mirror in mirrors]}\n\n @marshal() # type: ignore[arg-type]\n async def make_offer(self, request: MakeOfferRequest) -> MakeOfferResponse:\n fee = get_fee(self.service.config, {\"fee\": request.fee})\n offer = await self.service.make_offer(maker=request.maker, taker=request.taker, fee=fee)\n return MakeOfferResponse(success=True, offer=offer)\n\n @marshal() # type: ignore[arg-type]\n async def take_offer(self, request: TakeOfferRequest) -> TakeOfferResponse:\n fee = get_fee(self.service.config, {\"fee\": request.fee})\n trade_record = await self.service.take_offer(\n offer_bytes=request.offer.offer,\n maker=request.offer.maker,\n taker=request.offer.taker,\n fee=fee,\n )\n return TakeOfferResponse(success=True, trade_id=trade_record.trade_id)\n\n @marshal() # type: ignore[arg-type]\n async def verify_offer(self, request: TakeOfferRequest) -> VerifyOfferResponse:\n fee = get_fee(self.service.config, {\"fee\": request.fee})\n\n offer = TradingOffer.from_bytes(request.offer.offer)\n summary = await DataLayerWallet.get_offer_summary(offer=offer)\n\n try:\n verify_offer(maker=request.offer.maker, taker=request.offer.taker, summary=summary)\n except OfferIntegrityError as e:\n return VerifyOfferResponse(success=True, valid=False, error=str(e))\n\n return VerifyOfferResponse(success=True, valid=True, fee=fee)\n\n @marshal() # type: ignore[arg-type]\n async def cancel_offer(self, request: CancelOfferRequest) -> CancelOfferResponse:\n fee = get_fee(self.service.config, {\"fee\": request.fee})\n\n await self.service.cancel_offer(\n trade_id=request.trade_id,\n secure=request.secure,\n fee=fee,\n )\n\n return CancelOfferResponse(success=True)\n\n async def get_sync_status(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n if self.service is None:\n raise Exception(\"Data layer not created\")\n sync_status = await self.service.get_sync_status(id_bytes)\n\n return {\n \"sync_status\": {\n \"root_hash\": sync_status.root_hash.hex(),\n \"generation\": sync_status.generation,\n \"target_root_hash\": sync_status.target_root_hash.hex(),\n \"target_generation\": sync_status.target_generation,\n }\n }\n\n async def check_plugins(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n plugin_status = await self.service.check_plugins()\n\n return plugin_status.marshal()\n\n @marshal() # type: ignore[arg-type]\n async def clear_pending_roots(self, request: ClearPendingRootsRequest) -> ClearPendingRootsResponse:\n root = await self.service.data_store.clear_pending_roots(tree_id=request.store_id)\n\n return ClearPendingRootsResponse(success=root is not None, root=root)\n\n @streamable_marshal\n async def get_proof(self, request: GetProofRequest) -> GetProofResponse:\n root = await self.service.get_root(store_id=request.store_id)\n if root is None:\n raise ValueError(\"no root\")\n\n all_proofs: List[HashOnlyProof] = []\n for key in request.keys:\n node = await self.service.data_store.get_node_by_key(tree_id=request.store_id, key=key)\n pi = await self.service.data_store.get_proof_of_inclusion_by_hash(\n tree_id=request.store_id, node_hash=node.hash, use_optimized=True\n )\n\n proof = HashOnlyProof.from_key_value(\n key=key,\n value=node.value,\n node_hash=pi.node_hash,\n layers=[\n ProofLayer(\n other_hash_side=uint8(layer.other_hash_side),\n other_hash=layer.other_hash,\n combined_hash=layer.combined_hash,\n )\n for layer in pi.layers\n ],\n )\n all_proofs.append(proof)\n\n store_proof = StoreProofsHashes(store_id=request.store_id, proofs=all_proofs)\n return GetProofResponse(\n proof=DLProof(\n store_proofs=store_proof,\n coin_id=root.coin_id,\n inner_puzzle_hash=root.inner_puzzle_hash,\n ),\n success=True,\n )\n\n @streamable_marshal\n async def verify_proof(self, request: DLProof) -> VerifyProofResponse:\n response = await self.service.wallet_rpc.dl_verify_proof(request)\n return response\n", "path": "chia/rpc/data_layer_rpc_api.py" } ]
[ { "content": "from __future__ import annotations\n\nimport dataclasses\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, cast\n\nfrom chia.data_layer.data_layer_errors import OfferIntegrityError\nfrom chia.data_layer.data_layer_util import (\n CancelOfferRequest,\n CancelOfferResponse,\n ClearPendingRootsRequest,\n ClearPendingRootsResponse,\n DLProof,\n GetProofRequest,\n GetProofResponse,\n HashOnlyProof,\n MakeOfferRequest,\n MakeOfferResponse,\n ProofLayer,\n Side,\n StoreProofsHashes,\n Subscription,\n TakeOfferRequest,\n TakeOfferResponse,\n VerifyOfferResponse,\n VerifyProofResponse,\n)\nfrom chia.data_layer.data_layer_wallet import DataLayerWallet, Mirror, verify_offer\nfrom chia.rpc.data_layer_rpc_util import marshal\nfrom chia.rpc.rpc_server import Endpoint, EndpointResult\nfrom chia.rpc.util import marshal as streamable_marshal\nfrom chia.types.blockchain_format.sized_bytes import bytes32\nfrom chia.util.byte_types import hexstr_to_bytes\n\n# todo input assertions for all rpc's\nfrom chia.util.ints import uint8, uint64\nfrom chia.util.streamable import recurse_jsonify\nfrom chia.util.ws_message import WsRpcMessage\nfrom chia.wallet.trading.offer import Offer as TradingOffer\n\nif TYPE_CHECKING:\n from chia.data_layer.data_layer import DataLayer\n\n\ndef process_change(change: Dict[str, Any]) -> Dict[str, Any]:\n # TODO: A full class would likely be nice for this so downstream doesn't\n # have to deal with maybe-present attributes or Dict[str, Any] hints.\n reference_node_hash = change.get(\"reference_node_hash\")\n if reference_node_hash is not None:\n reference_node_hash = bytes32(hexstr_to_bytes(reference_node_hash))\n\n side = change.get(\"side\")\n if side is not None:\n side = Side(side)\n\n value = change.get(\"value\")\n if value is not None:\n value = hexstr_to_bytes(value)\n\n return {\n **change,\n \"key\": hexstr_to_bytes(change[\"key\"]),\n \"value\": value,\n \"reference_node_hash\": reference_node_hash,\n \"side\": side,\n }\n\n\ndef get_fee(config: Dict[str, Any], request: Dict[str, Any]) -> uint64:\n fee = request.get(\"fee\")\n if fee is None:\n fee = 0 # DL no longer reads the fee from the config\n return uint64(fee)\n\n\nclass DataLayerRpcApi:\n # TODO: other RPC APIs do not accept a wallet and the service start does not expect to provide one\n def __init__(self, data_layer: DataLayer): # , wallet: DataLayerWallet):\n self.service: DataLayer = data_layer\n self.service_name = \"chia_data_layer\"\n\n def get_routes(self) -> Dict[str, Endpoint]:\n return {\n \"/wallet_log_in\": self.wallet_log_in,\n \"/create_data_store\": self.create_data_store,\n \"/get_owned_stores\": self.get_owned_stores,\n \"/batch_update\": self.batch_update,\n \"/submit_pending_root\": self.submit_pending_root,\n \"/get_value\": self.get_value,\n \"/get_keys\": self.get_keys,\n \"/get_keys_values\": self.get_keys_values,\n \"/get_ancestors\": self.get_ancestors,\n \"/get_root\": self.get_root,\n \"/get_local_root\": self.get_local_root,\n \"/get_roots\": self.get_roots,\n \"/delete_key\": self.delete_key,\n \"/insert\": self.insert,\n \"/subscribe\": self.subscribe,\n \"/unsubscribe\": self.unsubscribe,\n \"/add_mirror\": self.add_mirror,\n \"/delete_mirror\": self.delete_mirror,\n \"/get_mirrors\": self.get_mirrors,\n \"/remove_subscriptions\": self.remove_subscriptions,\n \"/subscriptions\": self.subscriptions,\n \"/get_kv_diff\": self.get_kv_diff,\n \"/get_root_history\": self.get_root_history,\n \"/add_missing_files\": self.add_missing_files,\n \"/make_offer\": self.make_offer,\n \"/take_offer\": self.take_offer,\n \"/verify_offer\": self.verify_offer,\n \"/cancel_offer\": self.cancel_offer,\n \"/get_sync_status\": self.get_sync_status,\n \"/check_plugins\": self.check_plugins,\n \"/clear_pending_roots\": self.clear_pending_roots,\n \"/get_proof\": self.get_proof,\n \"/verify_proof\": self.verify_proof,\n }\n\n async def _state_changed(self, change: str, change_data: Optional[Dict[str, Any]]) -> List[WsRpcMessage]:\n return []\n\n async def wallet_log_in(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n fingerprint = cast(int, request[\"fingerprint\"])\n await self.service.wallet_log_in(fingerprint=fingerprint)\n return {}\n\n async def create_data_store(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n fee = get_fee(self.service.config, request)\n verbose = request.get(\"verbose\", False)\n txs, value = await self.service.create_store(uint64(fee))\n if verbose:\n return {\"txs\": txs, \"id\": value.hex()}\n else:\n return {\"id\": value.hex()}\n\n async def get_owned_stores(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n singleton_records = await self.service.get_owned_stores()\n return {\"store_ids\": [singleton.launcher_id.hex() for singleton in singleton_records]}\n\n async def get_value(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32.from_hexstr(request[\"id\"])\n key = hexstr_to_bytes(request[\"key\"])\n root_hash = request.get(\"root_hash\")\n if root_hash is not None:\n root_hash = bytes32.from_hexstr(root_hash)\n if self.service is None:\n raise Exception(\"Data layer not created\")\n value = await self.service.get_value(store_id=store_id, key=key, root_hash=root_hash)\n hex = None\n if value is not None:\n hex = value.hex()\n return {\"value\": hex}\n\n async def get_keys(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32.from_hexstr(request[\"id\"])\n root_hash = request.get(\"root_hash\")\n page = request.get(\"page\", None)\n max_page_size = request.get(\"max_page_size\", None)\n if root_hash is not None:\n root_hash = bytes32.from_hexstr(root_hash)\n if self.service is None:\n raise Exception(\"Data layer not created\")\n\n if page is None:\n keys = await self.service.get_keys(store_id, root_hash)\n else:\n keys_paginated = await self.service.get_keys_paginated(store_id, root_hash, page, max_page_size)\n keys = keys_paginated.keys\n\n if keys == [] and root_hash is not None and root_hash != bytes32([0] * 32):\n raise Exception(f\"Can't find keys for {root_hash}\")\n\n response: EndpointResult = {\"keys\": [f\"0x{key.hex()}\" for key in keys]}\n\n if page is not None:\n response.update(\n {\n \"total_pages\": keys_paginated.total_pages,\n \"total_bytes\": keys_paginated.total_bytes,\n \"root_hash\": keys_paginated.root_hash,\n },\n )\n\n return response\n\n async def get_keys_values(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n root_hash = request.get(\"root_hash\")\n page = request.get(\"page\", None)\n max_page_size = request.get(\"max_page_size\", None)\n if root_hash is not None:\n root_hash = bytes32.from_hexstr(root_hash)\n if self.service is None:\n raise Exception(\"Data layer not created\")\n\n if page is None:\n keys_values = await self.service.get_keys_values(store_id, root_hash)\n else:\n keys_values_paginated = await self.service.get_keys_values_paginated(\n store_id, root_hash, page, max_page_size\n )\n keys_values = keys_values_paginated.keys_values\n\n json_nodes = [recurse_jsonify(dataclasses.asdict(node)) for node in keys_values]\n if not json_nodes and root_hash is not None and root_hash != bytes32([0] * 32):\n raise Exception(f\"Can't find keys and values for {root_hash}\")\n\n response: EndpointResult = {\"keys_values\": json_nodes}\n\n if page is not None:\n response.update(\n {\n \"total_pages\": keys_values_paginated.total_pages,\n \"total_bytes\": keys_values_paginated.total_bytes,\n \"root_hash\": keys_values_paginated.root_hash,\n },\n )\n\n return response\n\n async def get_ancestors(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n node_hash = bytes32.from_hexstr(request[\"hash\"])\n if self.service is None:\n raise Exception(\"Data layer not created\")\n value = await self.service.get_ancestors(node_hash, store_id)\n return {\"ancestors\": value}\n\n async def batch_update(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n id - the id of the store we are operating on\n changelist - a list of changes to apply on store\n \"\"\"\n fee = get_fee(self.service.config, request)\n changelist = [process_change(change) for change in request[\"changelist\"]]\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n submit_on_chain = request.get(\"submit_on_chain\", True)\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n transaction_record = await self.service.batch_update(store_id, changelist, uint64(fee), submit_on_chain)\n if submit_on_chain:\n if transaction_record is None:\n raise Exception(f\"Batch update failed for: {store_id}\")\n return {\"tx_id\": transaction_record.name}\n else:\n if transaction_record is not None:\n raise Exception(\"Transaction submitted on chain, but submit_on_chain set to False\")\n return {}\n\n async def submit_pending_root(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n fee = get_fee(self.service.config, request)\n transaction_record = await self.service.submit_pending_root(store_id, uint64(fee))\n return {\"tx_id\": transaction_record.name}\n\n async def insert(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n rows_to_add a list of clvm objects as bytes to add to table\n rows_to_remove a list of row hashes to remove\n \"\"\"\n fee = get_fee(self.service.config, request)\n key = hexstr_to_bytes(request[\"key\"])\n value = hexstr_to_bytes(request[\"value\"])\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n changelist = [{\"action\": \"insert\", \"key\": key, \"value\": value}]\n transaction_record = await self.service.batch_update(store_id, changelist, uint64(fee))\n assert transaction_record is not None\n return {\"tx_id\": transaction_record.name}\n\n async def delete_key(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n rows_to_add a list of clvm objects as bytes to add to table\n rows_to_remove a list of row hashes to remove\n \"\"\"\n fee = get_fee(self.service.config, request)\n key = hexstr_to_bytes(request[\"key\"])\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n changelist = [{\"action\": \"delete\", \"key\": key}]\n transaction_record = await self.service.batch_update(store_id, changelist, uint64(fee))\n assert transaction_record is not None\n return {\"tx_id\": transaction_record.name}\n\n async def get_root(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"get hash of latest tree root\"\"\"\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n rec = await self.service.get_root(store_id)\n if rec is None:\n raise Exception(f\"Failed to get root for {store_id.hex()}\")\n return {\"hash\": rec.root, \"confirmed\": rec.confirmed, \"timestamp\": rec.timestamp}\n\n async def get_local_root(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"get hash of latest tree root saved in our local datastore\"\"\"\n store_id = bytes32(hexstr_to_bytes(request[\"id\"]))\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n res = await self.service.get_local_root(store_id)\n return {\"hash\": res}\n\n async def get_roots(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n get state hashes for a list of roots\n \"\"\"\n store_ids = request[\"ids\"]\n # todo input checks\n if self.service is None:\n raise Exception(\"Data layer not created\")\n roots = []\n for id in store_ids:\n id_bytes = bytes32.from_hexstr(id)\n rec = await self.service.get_root(id_bytes)\n if rec is not None:\n roots.append({\"id\": id_bytes, \"hash\": rec.root, \"confirmed\": rec.confirmed, \"timestamp\": rec.timestamp})\n return {\"root_hashes\": roots}\n\n async def subscribe(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n subscribe to singleton\n \"\"\"\n store_id = request.get(\"id\")\n if store_id is None:\n raise Exception(\"missing store id in request\")\n\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id_bytes = bytes32.from_hexstr(store_id)\n urls = request.get(\"urls\", [])\n await self.service.subscribe(store_id=store_id_bytes, urls=urls)\n return {}\n\n async def unsubscribe(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n unsubscribe from singleton\n \"\"\"\n store_id = request.get(\"id\")\n retain_data = request.get(\"retain\", False)\n if store_id is None:\n raise Exception(\"missing store id in request\")\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id_bytes = bytes32.from_hexstr(store_id)\n await self.service.unsubscribe(store_id_bytes, retain_data)\n return {}\n\n async def subscriptions(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n List current subscriptions\n \"\"\"\n if self.service is None:\n raise Exception(\"Data layer not created\")\n subscriptions: List[Subscription] = await self.service.get_subscriptions()\n return {\"store_ids\": [sub.tree_id.hex() for sub in subscriptions]}\n\n async def remove_subscriptions(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id = request.get(\"id\")\n if store_id is None:\n raise Exception(\"missing store id in request\")\n store_id_bytes = bytes32.from_hexstr(store_id)\n urls = request[\"urls\"]\n await self.service.remove_subscriptions(store_id=store_id_bytes, urls=urls)\n return {}\n\n async def add_missing_files(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n complete the data server files.\n \"\"\"\n if \"ids\" in request:\n store_ids = request[\"ids\"]\n ids_bytes = [bytes32.from_hexstr(id) for id in store_ids]\n else:\n subscriptions: List[Subscription] = await self.service.get_subscriptions()\n ids_bytes = [subscription.tree_id for subscription in subscriptions]\n overwrite = request.get(\"overwrite\", False)\n foldername: Optional[Path] = None\n if \"foldername\" in request:\n foldername = Path(request[\"foldername\"])\n for tree_id in ids_bytes:\n await self.service.add_missing_files(tree_id, overwrite, foldername)\n return {}\n\n async def get_root_history(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n get history of state hashes for a store\n \"\"\"\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n records = await self.service.get_root_history(id_bytes)\n res: List[Dict[str, Any]] = []\n for rec in records:\n res.insert(0, {\"root_hash\": rec.root, \"confirmed\": rec.confirmed, \"timestamp\": rec.timestamp})\n return {\"root_history\": res}\n\n async def get_kv_diff(self, request: Dict[str, Any]) -> EndpointResult:\n \"\"\"\n get kv diff between two root hashes\n \"\"\"\n if self.service is None:\n raise Exception(\"Data layer not created\")\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n hash_1 = request[\"hash_1\"]\n hash_1_bytes = bytes32.from_hexstr(hash_1)\n hash_2 = request[\"hash_2\"]\n hash_2_bytes = bytes32.from_hexstr(hash_2)\n page = request.get(\"page\", None)\n max_page_size = request.get(\"max_page_size\", None)\n res: List[Dict[str, Any]] = []\n\n if page is None:\n records_dict = await self.service.get_kv_diff(id_bytes, hash_1_bytes, hash_2_bytes)\n records = list(records_dict)\n else:\n kv_diff_paginated = await self.service.get_kv_diff_paginated(\n id_bytes, hash_1_bytes, hash_2_bytes, page, max_page_size\n )\n records = kv_diff_paginated.kv_diff\n\n for rec in records:\n res.append({\"type\": rec.type.name, \"key\": rec.key.hex(), \"value\": rec.value.hex()})\n\n response: EndpointResult = {\"diff\": res}\n if page is not None:\n response.update(\n {\n \"total_pages\": kv_diff_paginated.total_pages,\n \"total_bytes\": kv_diff_paginated.total_bytes,\n },\n )\n\n return response\n\n async def add_mirror(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n urls = request[\"urls\"]\n amount = request[\"amount\"]\n fee = get_fee(self.service.config, request)\n await self.service.add_mirror(id_bytes, urls, amount, fee)\n return {}\n\n async def delete_mirror(self, request: Dict[str, Any]) -> EndpointResult:\n coin_id = request[\"coin_id\"]\n coin_id_bytes = bytes32.from_hexstr(coin_id)\n fee = get_fee(self.service.config, request)\n await self.service.delete_mirror(coin_id_bytes, fee)\n return {}\n\n async def get_mirrors(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n mirrors: List[Mirror] = await self.service.get_mirrors(id_bytes)\n return {\"mirrors\": [mirror.to_json_dict() for mirror in mirrors]}\n\n @marshal() # type: ignore[arg-type]\n async def make_offer(self, request: MakeOfferRequest) -> MakeOfferResponse:\n fee = get_fee(self.service.config, {\"fee\": request.fee})\n offer = await self.service.make_offer(maker=request.maker, taker=request.taker, fee=fee)\n return MakeOfferResponse(success=True, offer=offer)\n\n @marshal() # type: ignore[arg-type]\n async def take_offer(self, request: TakeOfferRequest) -> TakeOfferResponse:\n fee = get_fee(self.service.config, {\"fee\": request.fee})\n trade_record = await self.service.take_offer(\n offer_bytes=request.offer.offer,\n maker=request.offer.maker,\n taker=request.offer.taker,\n fee=fee,\n )\n return TakeOfferResponse(success=True, trade_id=trade_record.trade_id)\n\n @marshal() # type: ignore[arg-type]\n async def verify_offer(self, request: TakeOfferRequest) -> VerifyOfferResponse:\n fee = get_fee(self.service.config, {\"fee\": request.fee})\n\n offer = TradingOffer.from_bytes(request.offer.offer)\n summary = await DataLayerWallet.get_offer_summary(offer=offer)\n\n try:\n verify_offer(maker=request.offer.maker, taker=request.offer.taker, summary=summary)\n except OfferIntegrityError as e:\n return VerifyOfferResponse(success=True, valid=False, error=str(e))\n\n return VerifyOfferResponse(success=True, valid=True, fee=fee)\n\n @marshal() # type: ignore[arg-type]\n async def cancel_offer(self, request: CancelOfferRequest) -> CancelOfferResponse:\n fee = get_fee(self.service.config, {\"fee\": request.fee})\n\n await self.service.cancel_offer(\n trade_id=request.trade_id,\n secure=request.secure,\n fee=fee,\n )\n\n return CancelOfferResponse(success=True)\n\n async def get_sync_status(self, request: Dict[str, Any]) -> EndpointResult:\n store_id = request[\"id\"]\n id_bytes = bytes32.from_hexstr(store_id)\n if self.service is None:\n raise Exception(\"Data layer not created\")\n sync_status = await self.service.get_sync_status(id_bytes)\n\n return {\n \"sync_status\": {\n \"root_hash\": sync_status.root_hash.hex(),\n \"generation\": sync_status.generation,\n \"target_root_hash\": sync_status.target_root_hash.hex(),\n \"target_generation\": sync_status.target_generation,\n }\n }\n\n async def check_plugins(self, request: Dict[str, Any]) -> EndpointResult:\n if self.service is None:\n raise Exception(\"Data layer not created\")\n plugin_status = await self.service.check_plugins()\n\n return plugin_status.marshal()\n\n @marshal() # type: ignore[arg-type]\n async def clear_pending_roots(self, request: ClearPendingRootsRequest) -> ClearPendingRootsResponse:\n root = await self.service.data_store.clear_pending_roots(tree_id=request.store_id)\n\n return ClearPendingRootsResponse(success=root is not None, root=root)\n\n @streamable_marshal\n async def get_proof(self, request: GetProofRequest) -> GetProofResponse:\n root = await self.service.get_root(store_id=request.store_id)\n if root is None:\n raise ValueError(\"no root\")\n\n all_proofs: List[HashOnlyProof] = []\n for key in request.keys:\n node = await self.service.data_store.get_node_by_key(tree_id=request.store_id, key=key)\n pi = await self.service.data_store.get_proof_of_inclusion_by_hash(\n tree_id=request.store_id, node_hash=node.hash, use_optimized=True\n )\n\n proof = HashOnlyProof.from_key_value(\n key=key,\n value=node.value,\n node_hash=pi.node_hash,\n layers=[\n ProofLayer(\n other_hash_side=uint8(layer.other_hash_side),\n other_hash=layer.other_hash,\n combined_hash=layer.combined_hash,\n )\n for layer in pi.layers\n ],\n )\n all_proofs.append(proof)\n\n store_proof = StoreProofsHashes(store_id=request.store_id, proofs=all_proofs)\n return GetProofResponse(\n proof=DLProof(\n store_proofs=store_proof,\n coin_id=root.coin_id,\n inner_puzzle_hash=root.inner_puzzle_hash,\n ),\n success=True,\n )\n\n @streamable_marshal\n async def verify_proof(self, request: DLProof) -> VerifyProofResponse:\n response = await self.service.wallet_rpc.dl_verify_proof(request)\n return response\n", "path": "chia/rpc/data_layer_rpc_api.py" } ]
diff --git a/chia/rpc/data_layer_rpc_api.py b/chia/rpc/data_layer_rpc_api.py index 2af5b6ccbeb4..702a77a2be88 100644 --- a/chia/rpc/data_layer_rpc_api.py +++ b/chia/rpc/data_layer_rpc_api.py @@ -69,8 +69,7 @@ def process_change(change: Dict[str, Any]) -> Dict[str, Any]: def get_fee(config: Dict[str, Any], request: Dict[str, Any]) -> uint64: fee = request.get("fee") if fee is None: - config_fee = config.get("fee", 0) - return uint64(config_fee) + fee = 0 # DL no longer reads the fee from the config return uint64(fee) diff --git a/chia/util/initial-config.yaml b/chia/util/initial-config.yaml index a4f5c02325ed..2b5530fda6fb 100644 --- a/chia/util/initial-config.yaml +++ b/chia/util/initial-config.yaml @@ -680,7 +680,6 @@ data_layer: # TODO: what considerations are there in choosing this? rpc_port: 8562 rpc_server_max_request_body_size: 26214400 - fee: 1000000000 # this is a debug and profiling facility that logs all SQLite commands to a # separate log file (under logging/data_sql.log).
frappe__frappe-15581
imaplib error when fetching emails When someone tried to pull emails from their account (using IMAP server), they encountered this error <img width="633" alt="Screenshot 2021-12-21 at 10 44 51 AM" src="https://user-images.githubusercontent.com/58825865/146875998-ade6c090-2a5c-4000-ab94-64a24d0cf88f.png"> The same issue was raised when we had the limit 100k(which is default) and was overridden in this PR #6751 <img width="674" alt="Screenshot 2021-12-21 at 11 04 00 AM" src="https://user-images.githubusercontent.com/58825865/146876789-8f320da0-00d4-452a-9253-6ea9e834c147.png">÷ Do we have to keep increasing the limit every time we face the issue? Or there is another possible solution? [Refer this](https://stackoverflow.com/questions/25457441/reading-emails-with-imaplib-got-more-than-10000-bytes-error) @surajshetty3416 @gavindsouza
[ { "content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport datetime\nimport email\nimport email.utils\nimport imaplib\nimport poplib\nimport re\nimport time\nimport json\nfrom email.header import decode_header\n\nimport _socket\nimport chardet\nfrom email_reply_parser import EmailReplyParser\n\nimport frappe\nfrom frappe import _, safe_decode, safe_encode\nfrom frappe.core.doctype.file.file import (MaxFileSizeReachedError,\n\tget_random_filename)\nfrom frappe.utils import (cint, convert_utc_to_user_timezone, cstr,\n\textract_email_id, markdown, now, parse_addr, strip, get_datetime,\n\tadd_days, sanitize_html)\nfrom frappe.utils.user import is_system_user\nfrom frappe.utils.html_utils import clean_email_html\n\n# fix due to a python bug in poplib that limits it to 2048\npoplib._MAXLINE = 20480\nimaplib._MAXLINE = 20480\n\n# fix due to a python bug in poplib that limits it to 2048\npoplib._MAXLINE = 20480\nimaplib._MAXLINE = 20480\n\n\nclass EmailSizeExceededError(frappe.ValidationError): pass\nclass EmailTimeoutError(frappe.ValidationError): pass\nclass TotalSizeExceededError(frappe.ValidationError): pass\nclass LoginLimitExceeded(frappe.ValidationError): pass\nclass SentEmailInInboxError(Exception):\n\tpass\n\nclass EmailServer:\n\t\"\"\"Wrapper for POP server to pull emails.\"\"\"\n\tdef __init__(self, args=None):\n\t\tself.setup(args)\n\n\tdef setup(self, args=None):\n\t\t# overrride\n\t\tself.settings = args or frappe._dict()\n\n\tdef check_mails(self):\n\t\t# overrride\n\t\treturn True\n\n\tdef process_message(self, mail):\n\t\t# overrride\n\t\tpass\n\n\tdef connect(self):\n\t\t\"\"\"Connect to **Email Account**.\"\"\"\n\t\tif cint(self.settings.use_imap):\n\t\t\treturn self.connect_imap()\n\t\telse:\n\t\t\treturn self.connect_pop()\n\n\tdef connect_imap(self):\n\t\t\"\"\"Connect to IMAP\"\"\"\n\t\ttry:\n\t\t\tif cint(self.settings.use_ssl):\n\t\t\t\tself.imap = Timed_IMAP4_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\n\t\t\telse:\n\t\t\t\tself.imap = Timed_IMAP4(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\n\t\t\tself.imap.login(self.settings.username, self.settings.password)\n\t\t\t# connection established!\n\t\t\treturn True\n\n\t\texcept _socket.error:\n\t\t\t# Invalid mail server -- due to refusing connection\n\t\t\tfrappe.msgprint(_('Invalid Mail Server. Please rectify and try again.'))\n\t\t\traise\n\n\tdef connect_pop(self):\n\t\t#this method return pop connection\n\t\ttry:\n\t\t\tif cint(self.settings.use_ssl):\n\t\t\t\tself.pop = Timed_POP3_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\n\t\t\telse:\n\t\t\t\tself.pop = Timed_POP3(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\n\n\t\t\tself.pop.user(self.settings.username)\n\t\t\tself.pop.pass_(self.settings.password)\n\n\t\t\t# connection established!\n\t\t\treturn True\n\n\t\texcept _socket.error:\n\t\t\t# log performs rollback and logs error in Error Log\n\t\t\tfrappe.log_error(\"receive.connect_pop\")\n\n\t\t\t# Invalid mail server -- due to refusing connection\n\t\t\tfrappe.msgprint(_('Invalid Mail Server. Please rectify and try again.'))\n\t\t\traise\n\n\t\texcept poplib.error_proto as e:\n\t\t\tif self.is_temporary_system_problem(e):\n\t\t\t\treturn False\n\n\t\t\telse:\n\t\t\t\tfrappe.msgprint(_('Invalid User Name or Support Password. Please rectify and try again.'))\n\t\t\t\traise\n\n\tdef select_imap_folder(self, folder):\n\t\tself.imap.select(folder)\n\n\tdef logout(self):\n\t\tif cint(self.settings.use_imap):\n\t\t\tself.imap.logout()\n\t\telse:\n\t\t\tself.pop.quit()\n\t\treturn\n\n\tdef get_messages(self, folder=\"INBOX\"):\n\t\t\"\"\"Returns new email messages in a list.\"\"\"\n\t\tif not (self.check_mails() or self.connect()):\n\t\t\treturn []\n\n\t\tfrappe.db.commit()\n\n\t\tuid_list = []\n\n\t\ttry:\n\t\t\t# track if errors arised\n\t\t\tself.errors = False\n\t\t\tself.latest_messages = []\n\t\t\tself.seen_status = {}\n\t\t\tself.uid_reindexed = False\n\n\t\t\tuid_list = email_list = self.get_new_mails(folder)\n\n\t\t\tif not email_list:\n\t\t\t\treturn\n\n\t\t\tnum = num_copy = len(email_list)\n\n\t\t\t# WARNING: Hard coded max no. of messages to be popped\n\t\t\tif num > 50: num = 50\n\n\t\t\t# size limits\n\t\t\tself.total_size = 0\n\t\t\tself.max_email_size = cint(frappe.local.conf.get(\"max_email_size\"))\n\t\t\tself.max_total_size = 5 * self.max_email_size\n\n\t\t\tfor i, message_meta in enumerate(email_list[:num]):\n\t\t\t\ttry:\n\t\t\t\t\tself.retrieve_message(message_meta, i+1)\n\t\t\t\texcept (TotalSizeExceededError, EmailTimeoutError, LoginLimitExceeded):\n\t\t\t\t\tbreak\n\t\t\t# WARNING: Mark as read - message number 101 onwards from the pop list\n\t\t\t# This is to avoid having too many messages entering the system\n\t\t\tnum = num_copy\n\t\t\tif not cint(self.settings.use_imap):\n\t\t\t\tif num > 100 and not self.errors:\n\t\t\t\t\tfor m in range(101, num+1):\n\t\t\t\t\t\tself.pop.dele(m)\n\n\t\texcept Exception as e:\n\t\t\tif self.has_login_limit_exceeded(e):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\traise\n\n\t\tout = { \"latest_messages\": self.latest_messages }\n\t\tif self.settings.use_imap:\n\t\t\tout.update({\n\t\t\t\t\"uid_list\": uid_list,\n\t\t\t\t\"seen_status\": self.seen_status,\n\t\t\t\t\"uid_reindexed\": self.uid_reindexed\n\t\t\t})\n\n\t\treturn out\n\n\tdef get_new_mails(self, folder):\n\t\t\"\"\"Return list of new mails\"\"\"\n\t\tif cint(self.settings.use_imap):\n\t\t\temail_list = []\n\t\t\tself.check_imap_uidvalidity(folder)\n\n\t\t\treadonly = False if self.settings.email_sync_rule == \"UNSEEN\" else True\n\n\t\t\tself.imap.select(folder, readonly=readonly)\n\t\t\tresponse, message = self.imap.uid('search', None, self.settings.email_sync_rule)\n\t\t\tif message[0]:\n\t\t\t\temail_list = message[0].split()\n\t\telse:\n\t\t\temail_list = self.pop.list()[1]\n\n\t\treturn email_list\n\n\tdef check_imap_uidvalidity(self, folder):\n\t\t# compare the UIDVALIDITY of email account and imap server\n\t\tuid_validity = self.settings.uid_validity\n\n\t\tresponse, message = self.imap.status(folder, \"(UIDVALIDITY UIDNEXT)\")\n\t\tcurrent_uid_validity = self.parse_imap_response(\"UIDVALIDITY\", message[0]) or 0\n\n\t\tuidnext = int(self.parse_imap_response(\"UIDNEXT\", message[0]) or \"1\")\n\t\tfrappe.db.set_value(\"Email Account\", self.settings.email_account, \"uidnext\", uidnext)\n\n\t\tif not uid_validity or uid_validity != current_uid_validity:\n\t\t\t# uidvalidity changed & all email uids are reindexed by server\n\t\t\tCommunication = frappe.qb.DocType(\"Communication\")\n\t\t\tfrappe.qb.update(Communication) \\\n\t\t\t\t.set(Communication.uid, -1) \\\n\t\t\t\t.where(Communication.communication_medium == \"Email\") \\\n\t\t\t\t.where(Communication.email_account == self.settings.email_account).run()\n\n\t\t\tif self.settings.use_imap:\n\t\t\t\t# new update for the IMAP Folder DocType\n\t\t\t\tIMAPFolder = frappe.qb.DocType(\"IMAP Folder\")\n\t\t\t\tfrappe.qb.update(IMAPFolder) \\\n\t\t\t\t\t.set(IMAPFolder.uidvalidity, current_uid_validity) \\\n\t\t\t\t\t.set(IMAPFolder.uidnext, uidnext) \\\n\t\t\t\t\t.where(IMAPFolder.parent == self.settings.email_account_name) \\\n\t\t\t\t\t.where(IMAPFolder.folder_name == folder).run()\n\t\t\telse:\n\t\t\t\tEmailAccount = frappe.qb.DocType(\"Email Account\")\n\t\t\t\tfrappe.qb.update(EmailAccount) \\\n\t\t\t\t\t.set(EmailAccount.uidvalidity, current_uid_validity) \\\n\t\t\t\t\t.set(EmailAccount.uidnext, uidnext) \\\n\t\t\t\t\t.where(EmailAccount.name == self.settings.email_account_name).run()\n\n\t\t\t# uid validity not found pulling emails for first time\n\t\t\tif not uid_validity:\n\t\t\t\tself.settings.email_sync_rule = \"UNSEEN\"\n\t\t\t\treturn\n\n\t\t\tsync_count = 100 if uid_validity else int(self.settings.initial_sync_count)\n\t\t\tfrom_uid = 1 if uidnext < (sync_count + 1) or (uidnext - sync_count) < 1 else uidnext - sync_count\n\t\t\t# sync last 100 email\n\t\t\tself.settings.email_sync_rule = \"UID {}:{}\".format(from_uid, uidnext)\n\t\t\tself.uid_reindexed = True\n\n\t\telif uid_validity == current_uid_validity:\n\t\t\treturn\n\n\tdef parse_imap_response(self, cmd, response):\n\t\tpattern = r\"(?<={cmd} )[0-9]*\".format(cmd=cmd)\n\t\tmatch = re.search(pattern, response.decode('utf-8'), re.U | re.I)\n\n\t\tif match:\n\t\t\treturn match.group(0)\n\t\telse:\n\t\t\treturn None\n\n\tdef retrieve_message(self, message_meta, msg_num=None):\n\t\tincoming_mail = None\n\t\ttry:\n\t\t\tself.validate_message_limits(message_meta)\n\n\t\t\tif cint(self.settings.use_imap):\n\t\t\t\tstatus, message = self.imap.uid('fetch', message_meta, '(BODY.PEEK[] BODY.PEEK[HEADER] FLAGS)')\n\t\t\t\traw = message[0]\n\n\t\t\t\tself.get_email_seen_status(message_meta, raw[0])\n\t\t\t\tself.latest_messages.append(raw[1])\n\t\t\telse:\n\t\t\t\tmsg = self.pop.retr(msg_num)\n\t\t\t\tself.latest_messages.append(b'\\n'.join(msg[1]))\n\t\texcept (TotalSizeExceededError, EmailTimeoutError):\n\t\t\t# propagate this error to break the loop\n\t\t\tself.errors = True\n\t\t\traise\n\n\t\texcept Exception as e:\n\t\t\tif self.has_login_limit_exceeded(e):\n\t\t\t\tself.errors = True\n\t\t\t\traise LoginLimitExceeded(e)\n\n\t\t\telse:\n\t\t\t\t# log performs rollback and logs error in Error Log\n\t\t\t\tfrappe.log_error(\"receive.get_messages\", self.make_error_msg(msg_num, incoming_mail))\n\t\t\t\tself.errors = True\n\t\t\t\tfrappe.db.rollback()\n\n\t\t\t\tif not cint(self.settings.use_imap):\n\t\t\t\t\tself.pop.dele(msg_num)\n\t\t\t\telse:\n\t\t\t\t\t# mark as seen if email sync rule is UNSEEN (syncing only unseen mails)\n\t\t\t\t\tif self.settings.email_sync_rule == \"UNSEEN\":\n\t\t\t\t\t\tself.imap.uid('STORE', message_meta, '+FLAGS', '(\\\\SEEN)')\n\t\telse:\n\t\t\tif not cint(self.settings.use_imap):\n\t\t\t\tself.pop.dele(msg_num)\n\t\t\telse:\n\t\t\t\t# mark as seen if email sync rule is UNSEEN (syncing only unseen mails)\n\t\t\t\tif self.settings.email_sync_rule == \"UNSEEN\":\n\t\t\t\t\tself.imap.uid('STORE', message_meta, '+FLAGS', '(\\\\SEEN)')\n\n\tdef get_email_seen_status(self, uid, flag_string):\n\t\t\"\"\" parse the email FLAGS response \"\"\"\n\t\tif not flag_string:\n\t\t\treturn None\n\n\t\tflags = []\n\t\tfor flag in imaplib.ParseFlags(flag_string) or []:\n\t\t\tpattern = re.compile(r\"\\w+\")\n\t\t\tmatch = re.search(pattern, frappe.as_unicode(flag))\n\t\t\tflags.append(match.group(0))\n\n\t\tif \"Seen\" in flags:\n\t\t\tself.seen_status.update({ uid: \"SEEN\" })\n\t\telse:\n\t\t\tself.seen_status.update({ uid: \"UNSEEN\" })\n\n\tdef has_login_limit_exceeded(self, e):\n\t\treturn \"-ERR Exceeded the login limit\" in strip(cstr(e.message))\n\n\tdef is_temporary_system_problem(self, e):\n\t\tmessages = (\n\t\t\t\"-ERR [SYS/TEMP] Temporary system problem. Please try again later.\",\n\t\t\t\"Connection timed out\",\n\t\t)\n\t\tfor message in messages:\n\t\t\tif message in strip(cstr(e)) or message in strip(cstr(getattr(e, 'strerror', ''))):\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef validate_message_limits(self, message_meta):\n\t\t# throttle based on email size\n\t\tif not self.max_email_size:\n\t\t\treturn\n\n\t\tm, size = message_meta.split()\n\t\tsize = cint(size)\n\n\t\tif size < self.max_email_size:\n\t\t\tself.total_size += size\n\t\t\tif self.total_size > self.max_total_size:\n\t\t\t\traise TotalSizeExceededError\n\t\telse:\n\t\t\traise EmailSizeExceededError\n\n\tdef make_error_msg(self, msg_num, incoming_mail):\n\t\terror_msg = \"Error in retrieving email.\"\n\t\tif not incoming_mail:\n\t\t\ttry:\n\t\t\t\t# retrieve headers\n\t\t\t\tincoming_mail = Email(b'\\n'.join(self.pop.top(msg_num, 5)[1]))\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\tif incoming_mail:\n\t\t\terror_msg += \"\\nDate: {date}\\nFrom: {from_email}\\nSubject: {subject}\\n\".format(\n\t\t\t\tdate=incoming_mail.date, from_email=incoming_mail.from_email, subject=incoming_mail.subject)\n\n\t\treturn error_msg\n\n\tdef update_flag(self, folder, uid_list=None):\n\t\t\"\"\" set all uids mails the flag as seen \"\"\"\n\t\tif not uid_list:\n\t\t\treturn\n\n\t\tif not self.connect():\n\t\t\treturn\n\n\t\tself.imap.select(folder)\n\t\tfor uid, operation in uid_list.items():\n\t\t\tif not uid: continue\n\n\t\t\top = \"+FLAGS\" if operation == \"Read\" else \"-FLAGS\"\n\t\t\ttry:\n\t\t\t\tself.imap.uid('STORE', uid, op, '(\\\\SEEN)')\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\nclass Email:\n\t\"\"\"Wrapper for an email.\"\"\"\n\tdef __init__(self, content):\n\t\t\"\"\"Parses headers, content, attachments from given raw message.\n\n\t\t:param content: Raw message.\"\"\"\n\t\tif isinstance(content, bytes):\n\t\t\tself.mail = email.message_from_bytes(content)\n\t\telse:\n\t\t\tself.mail = email.message_from_string(content)\n\n\t\tself.raw_message = content\n\t\tself.text_content = ''\n\t\tself.html_content = ''\n\t\tself.attachments = []\n\t\tself.cid_map = {}\n\t\tself.parse()\n\t\tself.set_content_and_type()\n\t\tself.set_subject()\n\t\tself.set_from()\n\t\tself.message_id = (self.mail.get('Message-ID') or \"\").strip(\" <>\")\n\n\t\tif self.mail[\"Date\"]:\n\t\t\ttry:\n\t\t\t\tutc = email.utils.mktime_tz(email.utils.parsedate_tz(self.mail[\"Date\"]))\n\t\t\t\tutc_dt = datetime.datetime.utcfromtimestamp(utc)\n\t\t\t\tself.date = convert_utc_to_user_timezone(utc_dt).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\texcept:\n\t\t\t\tself.date = now()\n\t\telse:\n\t\t\tself.date = now()\n\t\tif self.date > now():\n\t\t\tself.date = now()\n\n\t@property\n\tdef in_reply_to(self):\n\t\treturn (self.mail.get(\"In-Reply-To\") or \"\").strip(\" <>\")\n\n\tdef parse(self):\n\t\t\"\"\"Walk and process multi-part email.\"\"\"\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)\n\n\tdef set_subject(self):\n\t\t\"\"\"Parse and decode `Subject` header.\"\"\"\n\t\t_subject = decode_header(self.mail.get(\"Subject\", \"No Subject\"))\n\t\tself.subject = _subject[0][0] or \"\"\n\t\tif _subject[0][1]:\n\t\t\tself.subject = safe_decode(self.subject, _subject[0][1])\n\t\telse:\n\t\t\t# assume that the encoding is utf-8\n\t\t\tself.subject = safe_decode(self.subject)[:140]\n\n\t\tif not self.subject:\n\t\t\tself.subject = \"No Subject\"\n\n\tdef set_from(self):\n\t\t# gmail mailing-list compatibility\n\t\t# use X-Original-Sender if available, as gmail sometimes modifies the 'From'\n\t\t_from_email = self.decode_email(self.mail.get(\"X-Original-From\") or self.mail[\"From\"])\n\t\t_reply_to = self.decode_email(self.mail.get(\"Reply-To\"))\n\n\t\tif _reply_to and not frappe.db.get_value('Email Account', {\"email_id\":_reply_to}, 'email_id'):\n\t\t\tself.from_email = extract_email_id(_reply_to)\n\t\telse:\n\t\t\tself.from_email = extract_email_id(_from_email)\n\n\t\tif self.from_email:\n\t\t\tself.from_email = self.from_email.lower()\n\n\t\tself.from_real_name = parse_addr(_from_email)[0] if \"@\" in _from_email else _from_email\n\n\tdef decode_email(self, email):\n\t\tif not email: return\n\t\tdecoded = \"\"\n\t\tfor part, encoding in decode_header(frappe.as_unicode(email).replace(\"\\\"\",\" \").replace(\"\\'\",\" \")):\n\t\t\tif encoding:\n\t\t\t\tdecoded += part.decode(encoding)\n\t\t\telse:\n\t\t\t\tdecoded += safe_decode(part)\n\t\treturn decoded\n\n\tdef set_content_and_type(self):\n\t\tself.content, self.content_type = '[Blank Email]', 'text/plain'\n\t\tif self.html_content:\n\t\t\tself.content, self.content_type = self.html_content, 'text/html'\n\t\telse:\n\t\t\tself.content, self.content_type = EmailReplyParser.read(self.text_content).text.replace(\"\\n\",\"\\n\\n\"), 'text/plain'\n\n\tdef process_part(self, part):\n\t\t\"\"\"Parse email `part` and set it to `text_content`, `html_content` or `attachments`.\"\"\"\n\t\tcontent_type = part.get_content_type()\n\t\tif content_type == 'text/plain':\n\t\t\tself.text_content += self.get_payload(part)\n\n\t\telif content_type == 'text/html':\n\t\t\tself.html_content += self.get_payload(part)\n\n\t\telif content_type == 'message/rfc822':\n\t\t\t# sent by outlook when another email is sent as an attachment to this email\n\t\t\tself.show_attached_email_headers_in_content(part)\n\n\t\telif part.get_filename() or 'image' in content_type:\n\t\t\tself.get_attachment(part)\n\n\tdef show_attached_email_headers_in_content(self, part):\n\t\t# get the multipart/alternative message\n\t\ttry:\n\t\t\tfrom html import escape # python 3.x\n\t\texcept ImportError:\n\t\t\tfrom cgi import escape # python 2.x\n\n\t\tmessage = list(part.walk())[1]\n\t\theaders = []\n\t\tfor key in ('From', 'To', 'Subject', 'Date'):\n\t\t\tvalue = cstr(message.get(key))\n\t\t\tif value:\n\t\t\t\theaders.append('{label}: {value}'.format(label=_(key), value=escape(value)))\n\n\t\tself.text_content += '\\n'.join(headers)\n\t\tself.html_content += '<hr>' + '\\n'.join('<p>{0}</p>'.format(h) for h in headers)\n\n\t\tif not message.is_multipart() and message.get_content_type()=='text/plain':\n\t\t\t# email.parser didn't parse it!\n\t\t\ttext_content = self.get_payload(message)\n\t\t\tself.text_content += text_content\n\t\t\tself.html_content += markdown(text_content)\n\n\tdef get_charset(self, part):\n\t\t\"\"\"Detect charset.\"\"\"\n\t\tcharset = part.get_content_charset()\n\t\tif not charset:\n\t\t\tcharset = chardet.detect(safe_encode(cstr(part)))['encoding']\n\n\t\treturn charset\n\n\tdef get_payload(self, part):\n\t\tcharset = self.get_charset(part)\n\n\t\ttry:\n\t\t\treturn str(part.get_payload(decode=True), str(charset), \"ignore\")\n\t\texcept LookupError:\n\t\t\treturn part.get_payload()\n\n\tdef get_attachment(self, part):\n\t\t#charset = self.get_charset(part)\n\t\tfcontent = part.get_payload(decode=True)\n\n\t\tif fcontent:\n\t\t\tcontent_type = part.get_content_type()\n\t\t\tfname = part.get_filename()\n\t\t\tif fname:\n\t\t\t\ttry:\n\t\t\t\t\tfname = fname.replace('\\n', ' ').replace('\\r', '')\n\t\t\t\t\tfname = cstr(decode_header(fname)[0][0])\n\t\t\t\texcept:\n\t\t\t\t\tfname = get_random_filename(content_type=content_type)\n\t\t\telse:\n\t\t\t\tfname = get_random_filename(content_type=content_type)\n\n\t\t\tself.attachments.append({\n\t\t\t\t'content_type': content_type,\n\t\t\t\t'fname': fname,\n\t\t\t\t'fcontent': fcontent,\n\t\t\t})\n\n\t\t\tcid = (cstr(part.get(\"Content-Id\")) or \"\").strip(\"><\")\n\t\t\tif cid:\n\t\t\t\tself.cid_map[fname] = cid\n\n\tdef save_attachments_in_doc(self, doc):\n\t\t\"\"\"Save email attachments in given document.\"\"\"\n\t\tsaved_attachments = []\n\n\t\tfor attachment in self.attachments:\n\t\t\ttry:\n\t\t\t\t_file = frappe.get_doc({\n\t\t\t\t\t\"doctype\": \"File\",\n\t\t\t\t\t\"file_name\": attachment['fname'],\n\t\t\t\t\t\"attached_to_doctype\": doc.doctype,\n\t\t\t\t\t\"attached_to_name\": doc.name,\n\t\t\t\t\t\"is_private\": 1,\n\t\t\t\t\t\"content\": attachment['fcontent']})\n\t\t\t\t_file.save()\n\t\t\t\tsaved_attachments.append(_file)\n\n\t\t\t\tif attachment['fname'] in self.cid_map:\n\t\t\t\t\tself.cid_map[_file.name] = self.cid_map[attachment['fname']]\n\n\t\t\texcept MaxFileSizeReachedError:\n\t\t\t\t# WARNING: bypass max file size exception\n\t\t\t\tpass\n\t\t\texcept frappe.FileAlreadyAttachedException:\n\t\t\t\tpass\n\t\t\texcept frappe.DuplicateEntryError:\n\t\t\t\t# same file attached twice??\n\t\t\t\tpass\n\n\t\treturn saved_attachments\n\n\tdef get_thread_id(self):\n\t\t\"\"\"Extract thread ID from `[]`\"\"\"\n\t\tl = re.findall(r'(?<=\\[)[\\w/-]+', self.subject)\n\t\treturn l and l[0] or None\n\n\tdef is_reply(self):\n\t\treturn bool(self.in_reply_to)\n\nclass InboundMail(Email):\n\t\"\"\"Class representation of incoming mail along with mail handlers.\n\t\"\"\"\n\tdef __init__(self, content, email_account, uid=None, seen_status=None):\n\t\tsuper().__init__(content)\n\t\tself.email_account = email_account\n\t\tself.uid = uid or -1\n\t\tself.seen_status = seen_status or 0\n\n\t\t# System documents related to this mail\n\t\tself._parent_email_queue = None\n\t\tself._parent_communication = None\n\t\tself._reference_document = None\n\n\t\tself.flags = frappe._dict()\n\n\tdef get_content(self):\n\t\tif self.content_type == 'text/html':\n\t\t\treturn clean_email_html(self.content)\n\n\tdef process(self):\n\t\t\"\"\"Create communication record from email.\n\t\t\"\"\"\n\t\tif self.is_sender_same_as_receiver() and not self.is_reply():\n\t\t\tif frappe.flags.in_test:\n\t\t\t\tprint('WARN: Cannot pull email. Sender same as recipient inbox')\n\t\t\traise SentEmailInInboxError\n\n\t\tcommunication = self.is_exist_in_system()\n\t\tif communication:\n\t\t\tcommunication.update_db(uid=self.uid)\n\t\t\tcommunication.reload()\n\t\t\treturn communication\n\n\t\tself.flags.is_new_communication = True\n\t\treturn self._build_communication_doc()\n\n\tdef _build_communication_doc(self):\n\t\tdata = self.as_dict()\n\t\tdata['doctype'] = \"Communication\"\n\n\t\tif self.parent_communication():\n\t\t\tdata['in_reply_to'] = self.parent_communication().name\n\n\t\tif self.reference_document():\n\t\t\tdata['reference_doctype'] = self.reference_document().doctype\n\t\t\tdata['reference_name'] = self.reference_document().name\n\t\telif self.email_account.append_to and self.email_account.append_to != 'Communication':\n\t\t\treference_doc = self._create_reference_document(self.email_account.append_to)\n\t\t\tif reference_doc:\n\t\t\t\tdata['reference_doctype'] = reference_doc.doctype\n\t\t\t\tdata['reference_name'] = reference_doc.name\n\t\t\t\tdata['is_first'] = True\n\n\t\tif self.is_notification():\n\t\t\t# Disable notifications for notification.\n\t\t\tdata['unread_notification_sent'] = 1\n\n\t\tif self.seen_status:\n\t\t\tdata['_seen'] = json.dumps(self.get_users_linked_to_account(self.email_account))\n\n\t\tcommunication = frappe.get_doc(data)\n\t\tcommunication.flags.in_receive = True\n\t\tcommunication.insert(ignore_permissions=True)\n\n\t\t# save attachments\n\t\tcommunication._attachments = self.save_attachments_in_doc(communication)\n\t\tcommunication.content = sanitize_html(self.replace_inline_images(communication._attachments))\n\t\tcommunication.save()\n\t\treturn communication\n\n\tdef replace_inline_images(self, attachments):\n\t\t# replace inline images\n\t\tcontent = self.content\n\t\tfor file in attachments:\n\t\t\tif file.name in self.cid_map and self.cid_map[file.name]:\n\t\t\t\tcontent = content.replace(\"cid:{0}\".format(self.cid_map[file.name]),\n\t\t\t\t\tfile.file_url)\n\t\treturn content\n\n\tdef is_notification(self):\n\t\tisnotification = self.mail.get(\"isnotification\")\n\t\treturn isnotification and (\"notification\" in isnotification)\n\n\tdef is_exist_in_system(self):\n\t\t\"\"\"Check if this email already exists in the system(as communication document).\n\t\t\"\"\"\n\t\tfrom frappe.core.doctype.communication.communication import Communication\n\t\tif not self.message_id:\n\t\t\treturn\n\n\t\treturn Communication.find_one_by_filters(message_id = self.message_id,\n\t\t\torder_by = 'creation DESC')\n\n\tdef is_sender_same_as_receiver(self):\n\t\treturn self.from_email == self.email_account.email_id\n\n\tdef is_reply_to_system_sent_mail(self):\n\t\t\"\"\"Is it a reply to already sent mail.\n\t\t\"\"\"\n\t\treturn self.is_reply() and frappe.local.site in self.in_reply_to\n\n\tdef parent_email_queue(self):\n\t\t\"\"\"Get parent record from `Email Queue`.\n\n\t\tIf it is a reply to already sent mail, then there will be a parent record in EMail Queue.\n\t\t\"\"\"\n\t\tfrom frappe.email.doctype.email_queue.email_queue import EmailQueue\n\n\t\tif self._parent_email_queue is not None:\n\t\t\treturn self._parent_email_queue\n\n\t\tparent_email_queue = ''\n\t\tif self.is_reply_to_system_sent_mail():\n\t\t\tparent_email_queue = EmailQueue.find_one_by_filters(message_id=self.in_reply_to)\n\n\t\tself._parent_email_queue = parent_email_queue or ''\n\t\treturn self._parent_email_queue\n\n\tdef parent_communication(self):\n\t\t\"\"\"Find a related communication so that we can prepare a mail thread.\n\n\t\tThe way it happens is by using in-reply-to header, and we can't make thread if it does not exist.\n\n\t\tHere are the cases to handle:\n\t\t1. If mail is a reply to already sent mail, then we can get parent communicaion from\n\t\t\tEmail Queue record.\n\t\t2. Sometimes we send communication name in message-ID directly, use that to get parent communication.\n\t\t3. Sender sent a reply but reply is on top of what (s)he sent before,\n\t\t\tthen parent record exists directly in communication.\n\t\t\"\"\"\n\t\tfrom frappe.core.doctype.communication.communication import Communication\n\t\tif self._parent_communication is not None:\n\t\t\treturn self._parent_communication\n\n\t\tif not self.is_reply():\n\t\t\treturn ''\n\n\t\tif not self.is_reply_to_system_sent_mail():\n\t\t\tcommunication = Communication.find_one_by_filters(message_id=self.in_reply_to,\n\t\t\t\tcreation = ['>=', self.get_relative_dt(-30)])\n\t\telif self.parent_email_queue() and self.parent_email_queue().communication:\n\t\t\tcommunication = Communication.find(self.parent_email_queue().communication, ignore_error=True)\n\t\telse:\n\t\t\treference = self.in_reply_to\n\t\t\tif '@' in self.in_reply_to:\n\t\t\t\treference, _ = self.in_reply_to.split(\"@\", 1)\n\t\t\tcommunication = Communication.find(reference, ignore_error=True)\n\n\t\tself._parent_communication = communication or ''\n\t\treturn self._parent_communication\n\n\tdef reference_document(self):\n\t\t\"\"\"Reference document is a document to which mail relate to.\n\n\t\tWe can get reference document from Parent record(EmailQueue | Communication) if exists.\n\t\tOtherwise we do subject match to find reference document if we know the reference(append_to) doctype.\n\t\t\"\"\"\n\t\tif self._reference_document is not None:\n\t\t\treturn self._reference_document\n\n\t\treference_document = \"\"\n\t\tparent = self.parent_email_queue() or self.parent_communication()\n\n\t\tif parent and parent.reference_doctype:\n\t\t\treference_doctype, reference_name = parent.reference_doctype, parent.reference_name\n\t\t\treference_document = self.get_doc(reference_doctype, reference_name, ignore_error=True)\n\n\t\tif not reference_document and self.email_account.append_to:\n\t\t\treference_document = self.match_record_by_subject_and_sender(self.email_account.append_to)\n\n\t\tself._reference_document = reference_document or ''\n\t\treturn self._reference_document\n\n\tdef get_reference_name_from_subject(self):\n\t\t\"\"\"\n\t\tEx: \"Re: Your email (#OPP-2020-2334343)\"\n\t\t\"\"\"\n\t\treturn self.subject.rsplit('#', 1)[-1].strip(' ()')\n\n\tdef match_record_by_subject_and_sender(self, doctype):\n\t\t\"\"\"Find a record in the given doctype that matches with email subject and sender.\n\n\t\tCases:\n\t\t1. Sometimes record name is part of subject. We can get document by parsing name from subject\n\t\t2. Find by matching sender and subject\n\t\t3. Find by matching subject alone (Special case)\n\t\t\tEx: when a System User is using Outlook and replies to an email from their own client,\n\t\t\tit reaches the Email Account with the threading info lost and the (sender + subject match)\n\t\t\tdoesn't work because the sender in the first communication was someone different to whom\n\t\t\tthe system user is replying to via the common email account in Frappe. This fix bypasses\n\t\t\tthe sender match when the sender is a system user and subject is atleast 10 chars long\n\t\t\t(for additional safety)\n\n\t\tNOTE: We consider not to match by subject if match record is very old.\n\t\t\"\"\"\n\t\tname = self.get_reference_name_from_subject()\n\t\temail_fields = self.get_email_fields(doctype)\n\n\t\trecord = self.get_doc(doctype, name, ignore_error=True) if name else None\n\n\t\tif not record:\n\t\t\tsubject = self.clean_subject(self.subject)\n\t\t\tfilters = {\n\t\t\t\temail_fields.subject_field: (\"like\", f\"%{subject}%\"),\n\t\t\t\t\"creation\": (\">\", self.get_relative_dt(days=-60))\n\t\t\t}\n\n\t\t\t# Sender check is not needed incase mail is from system user.\n\t\t\tif not (len(subject) > 10 and is_system_user(self.from_email)):\n\t\t\t\tfilters[email_fields.sender_field] = self.from_email\n\n\t\t\tname = frappe.db.get_value(self.email_account.append_to, filters = filters)\n\t\t\trecord = self.get_doc(doctype, name, ignore_error=True) if name else None\n\t\treturn record\n\n\tdef _create_reference_document(self, doctype):\n\t\t\"\"\" Create reference document if it does not exist in the system.\n\t\t\"\"\"\n\t\tparent = frappe.new_doc(doctype)\n\t\temail_fileds = self.get_email_fields(doctype)\n\n\t\tif email_fileds.subject_field:\n\t\t\tparent.set(email_fileds.subject_field, frappe.as_unicode(self.subject)[:140])\n\n\t\tif email_fileds.sender_field:\n\t\t\tparent.set(email_fileds.sender_field, frappe.as_unicode(self.from_email))\n\n\t\tparent.flags.ignore_mandatory = True\n\n\t\ttry:\n\t\t\tparent.insert(ignore_permissions=True)\n\t\texcept frappe.DuplicateEntryError:\n\t\t\t# try and find matching parent\n\t\t\tparent_name = frappe.db.get_value(self.email_account.append_to,\n\t\t\t\t{email_fileds.sender_field: self.from_email}\n\t\t\t)\n\t\t\tif parent_name:\n\t\t\t\tparent.name = parent_name\n\t\t\telse:\n\t\t\t\tparent = None\n\t\treturn parent\n\n\n\t@staticmethod\n\tdef get_doc(doctype, docname, ignore_error=False):\n\t\ttry:\n\t\t\treturn frappe.get_doc(doctype, docname)\n\t\texcept frappe.DoesNotExistError:\n\t\t\tif ignore_error:\n\t\t\t\treturn\n\t\t\traise\n\n\t@staticmethod\n\tdef get_relative_dt(days):\n\t\t\"\"\"Get relative to current datetime. Only relative days are supported.\n\t\t\"\"\"\n\t\treturn add_days(get_datetime(), days)\n\n\t@staticmethod\n\tdef get_users_linked_to_account(email_account):\n\t\t\"\"\"Get list of users who linked to Email account.\n\t\t\"\"\"\n\t\tusers = frappe.get_all(\"User Email\", filters={\"email_account\": email_account.name},\n\t\t\tfields=[\"parent\"])\n\t\treturn list(set([user.get(\"parent\") for user in users]))\n\n\t@staticmethod\n\tdef clean_subject(subject):\n\t\t\"\"\"Remove Prefixes like 'fw', FWD', 're' etc from subject.\n\t\t\"\"\"\n\t\t# Match strings like \"fw:\", \"re\t:\" etc.\n\t\tregex = r\"(^\\s*(fw|fwd|wg)[^:]*:|\\s*(re|aw)[^:]*:\\s*)*\"\n\t\treturn frappe.as_unicode(strip(re.sub(regex, \"\", subject, 0, flags=re.IGNORECASE)))\n\n\t@staticmethod\n\tdef get_email_fields(doctype):\n\t\t\"\"\"Returns Email related fields of a doctype.\n\t\t\"\"\"\n\t\tfields = frappe._dict()\n\n\t\temail_fields = ['subject_field', 'sender_field']\n\t\tmeta = frappe.get_meta(doctype)\n\n\t\tfor field in email_fields:\n\t\t\tif hasattr(meta, field):\n\t\t\t\tfields[field] = getattr(meta, field)\n\t\treturn fields\n\n\t@staticmethod\n\tdef get_document(self, doctype, name):\n\t\t\"\"\"Is same as frappe.get_doc but suppresses the DoesNotExist error.\n\t\t\"\"\"\n\t\ttry:\n\t\t\treturn frappe.get_doc(doctype, name)\n\t\texcept frappe.DoesNotExistError:\n\t\t\treturn None\n\n\tdef as_dict(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\treturn {\n\t\t\t\"subject\": self.subject,\n\t\t\t\"content\": self.get_content(),\n\t\t\t'text_content': self.text_content,\n\t\t\t\"sent_or_received\": \"Received\",\n\t\t\t\"sender_full_name\": self.from_real_name,\n\t\t\t\"sender\": self.from_email,\n\t\t\t\"recipients\": self.mail.get(\"To\"),\n\t\t\t\"cc\": self.mail.get(\"CC\"),\n\t\t\t\"email_account\": self.email_account.name,\n\t\t\t\"communication_medium\": \"Email\",\n\t\t\t\"uid\": self.uid,\n\t\t\t\"message_id\": self.message_id,\n\t\t\t\"communication_date\": self.date,\n\t\t\t\"has_attachment\": 1 if self.attachments else 0,\n\t\t\t\"seen\": self.seen_status or 0\n\t\t}\n\nclass TimerMixin(object):\n\tdef __init__(self, *args, **kwargs):\n\t\tself.timeout = kwargs.pop('timeout', 0.0)\n\t\tself.elapsed_time = 0.0\n\t\tself._super.__init__(self, *args, **kwargs)\n\t\tif self.timeout:\n\t\t\t# set per operation timeout to one-fifth of total pop timeout\n\t\t\tself.sock.settimeout(self.timeout / 5.0)\n\n\tdef _getline(self, *args, **kwargs):\n\t\tstart_time = time.time()\n\t\tret = self._super._getline(self, *args, **kwargs)\n\n\t\tself.elapsed_time += time.time() - start_time\n\t\tif self.timeout and self.elapsed_time > self.timeout:\n\t\t\traise EmailTimeoutError\n\n\t\treturn ret\n\n\tdef quit(self, *args, **kwargs):\n\t\tself.elapsed_time = 0.0\n\t\treturn self._super.quit(self, *args, **kwargs)\n\nclass Timed_POP3(TimerMixin, poplib.POP3):\n\t_super = poplib.POP3\n\nclass Timed_POP3_SSL(TimerMixin, poplib.POP3_SSL):\n\t_super = poplib.POP3_SSL\n\nclass Timed_IMAP4(TimerMixin, imaplib.IMAP4):\n\t_super = imaplib.IMAP4\n\nclass Timed_IMAP4_SSL(TimerMixin, imaplib.IMAP4_SSL):\n\t_super = imaplib.IMAP4_SSL\n", "path": "frappe/email/receive.py" } ]
[ { "content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport datetime\nimport email\nimport email.utils\nimport imaplib\nimport poplib\nimport re\nimport time\nimport json\nfrom email.header import decode_header\n\nimport _socket\nimport chardet\nfrom email_reply_parser import EmailReplyParser\n\nimport frappe\nfrom frappe import _, safe_decode, safe_encode\nfrom frappe.core.doctype.file.file import (MaxFileSizeReachedError,\n\tget_random_filename)\nfrom frappe.utils import (cint, convert_utc_to_user_timezone, cstr,\n\textract_email_id, markdown, now, parse_addr, strip, get_datetime,\n\tadd_days, sanitize_html)\nfrom frappe.utils.user import is_system_user\nfrom frappe.utils.html_utils import clean_email_html\n\n# fix due to a python bug in poplib that limits it to 2048\npoplib._MAXLINE = 20480\n\n\n\nclass EmailSizeExceededError(frappe.ValidationError): pass\nclass EmailTimeoutError(frappe.ValidationError): pass\nclass TotalSizeExceededError(frappe.ValidationError): pass\nclass LoginLimitExceeded(frappe.ValidationError): pass\nclass SentEmailInInboxError(Exception):\n\tpass\n\nclass EmailServer:\n\t\"\"\"Wrapper for POP server to pull emails.\"\"\"\n\tdef __init__(self, args=None):\n\t\tself.setup(args)\n\n\tdef setup(self, args=None):\n\t\t# overrride\n\t\tself.settings = args or frappe._dict()\n\n\tdef check_mails(self):\n\t\t# overrride\n\t\treturn True\n\n\tdef process_message(self, mail):\n\t\t# overrride\n\t\tpass\n\n\tdef connect(self):\n\t\t\"\"\"Connect to **Email Account**.\"\"\"\n\t\tif cint(self.settings.use_imap):\n\t\t\treturn self.connect_imap()\n\t\telse:\n\t\t\treturn self.connect_pop()\n\n\tdef connect_imap(self):\n\t\t\"\"\"Connect to IMAP\"\"\"\n\t\ttry:\n\t\t\tif cint(self.settings.use_ssl):\n\t\t\t\tself.imap = Timed_IMAP4_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\n\t\t\telse:\n\t\t\t\tself.imap = Timed_IMAP4(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\n\t\t\tself.imap.login(self.settings.username, self.settings.password)\n\t\t\t# connection established!\n\t\t\treturn True\n\n\t\texcept _socket.error:\n\t\t\t# Invalid mail server -- due to refusing connection\n\t\t\tfrappe.msgprint(_('Invalid Mail Server. Please rectify and try again.'))\n\t\t\traise\n\n\tdef connect_pop(self):\n\t\t#this method return pop connection\n\t\ttry:\n\t\t\tif cint(self.settings.use_ssl):\n\t\t\t\tself.pop = Timed_POP3_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\n\t\t\telse:\n\t\t\t\tself.pop = Timed_POP3(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\n\n\t\t\tself.pop.user(self.settings.username)\n\t\t\tself.pop.pass_(self.settings.password)\n\n\t\t\t# connection established!\n\t\t\treturn True\n\n\t\texcept _socket.error:\n\t\t\t# log performs rollback and logs error in Error Log\n\t\t\tfrappe.log_error(\"receive.connect_pop\")\n\n\t\t\t# Invalid mail server -- due to refusing connection\n\t\t\tfrappe.msgprint(_('Invalid Mail Server. Please rectify and try again.'))\n\t\t\traise\n\n\t\texcept poplib.error_proto as e:\n\t\t\tif self.is_temporary_system_problem(e):\n\t\t\t\treturn False\n\n\t\t\telse:\n\t\t\t\tfrappe.msgprint(_('Invalid User Name or Support Password. Please rectify and try again.'))\n\t\t\t\traise\n\n\tdef select_imap_folder(self, folder):\n\t\tself.imap.select(folder)\n\n\tdef logout(self):\n\t\tif cint(self.settings.use_imap):\n\t\t\tself.imap.logout()\n\t\telse:\n\t\t\tself.pop.quit()\n\t\treturn\n\n\tdef get_messages(self, folder=\"INBOX\"):\n\t\t\"\"\"Returns new email messages in a list.\"\"\"\n\t\tif not (self.check_mails() or self.connect()):\n\t\t\treturn []\n\n\t\tfrappe.db.commit()\n\n\t\tuid_list = []\n\n\t\ttry:\n\t\t\t# track if errors arised\n\t\t\tself.errors = False\n\t\t\tself.latest_messages = []\n\t\t\tself.seen_status = {}\n\t\t\tself.uid_reindexed = False\n\n\t\t\tuid_list = email_list = self.get_new_mails(folder)\n\n\t\t\tif not email_list:\n\t\t\t\treturn\n\n\t\t\tnum = num_copy = len(email_list)\n\n\t\t\t# WARNING: Hard coded max no. of messages to be popped\n\t\t\tif num > 50: num = 50\n\n\t\t\t# size limits\n\t\t\tself.total_size = 0\n\t\t\tself.max_email_size = cint(frappe.local.conf.get(\"max_email_size\"))\n\t\t\tself.max_total_size = 5 * self.max_email_size\n\n\t\t\tfor i, message_meta in enumerate(email_list[:num]):\n\t\t\t\ttry:\n\t\t\t\t\tself.retrieve_message(message_meta, i+1)\n\t\t\t\texcept (TotalSizeExceededError, EmailTimeoutError, LoginLimitExceeded):\n\t\t\t\t\tbreak\n\t\t\t# WARNING: Mark as read - message number 101 onwards from the pop list\n\t\t\t# This is to avoid having too many messages entering the system\n\t\t\tnum = num_copy\n\t\t\tif not cint(self.settings.use_imap):\n\t\t\t\tif num > 100 and not self.errors:\n\t\t\t\t\tfor m in range(101, num+1):\n\t\t\t\t\t\tself.pop.dele(m)\n\n\t\texcept Exception as e:\n\t\t\tif self.has_login_limit_exceeded(e):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\traise\n\n\t\tout = { \"latest_messages\": self.latest_messages }\n\t\tif self.settings.use_imap:\n\t\t\tout.update({\n\t\t\t\t\"uid_list\": uid_list,\n\t\t\t\t\"seen_status\": self.seen_status,\n\t\t\t\t\"uid_reindexed\": self.uid_reindexed\n\t\t\t})\n\n\t\treturn out\n\n\tdef get_new_mails(self, folder):\n\t\t\"\"\"Return list of new mails\"\"\"\n\t\tif cint(self.settings.use_imap):\n\t\t\temail_list = []\n\t\t\tself.check_imap_uidvalidity(folder)\n\n\t\t\treadonly = False if self.settings.email_sync_rule == \"UNSEEN\" else True\n\n\t\t\tself.imap.select(folder, readonly=readonly)\n\t\t\tresponse, message = self.imap.uid('search', None, self.settings.email_sync_rule)\n\t\t\tif message[0]:\n\t\t\t\temail_list = message[0].split()\n\t\telse:\n\t\t\temail_list = self.pop.list()[1]\n\n\t\treturn email_list\n\n\tdef check_imap_uidvalidity(self, folder):\n\t\t# compare the UIDVALIDITY of email account and imap server\n\t\tuid_validity = self.settings.uid_validity\n\n\t\tresponse, message = self.imap.status(folder, \"(UIDVALIDITY UIDNEXT)\")\n\t\tcurrent_uid_validity = self.parse_imap_response(\"UIDVALIDITY\", message[0]) or 0\n\n\t\tuidnext = int(self.parse_imap_response(\"UIDNEXT\", message[0]) or \"1\")\n\t\tfrappe.db.set_value(\"Email Account\", self.settings.email_account, \"uidnext\", uidnext)\n\n\t\tif not uid_validity or uid_validity != current_uid_validity:\n\t\t\t# uidvalidity changed & all email uids are reindexed by server\n\t\t\tCommunication = frappe.qb.DocType(\"Communication\")\n\t\t\tfrappe.qb.update(Communication) \\\n\t\t\t\t.set(Communication.uid, -1) \\\n\t\t\t\t.where(Communication.communication_medium == \"Email\") \\\n\t\t\t\t.where(Communication.email_account == self.settings.email_account).run()\n\n\t\t\tif self.settings.use_imap:\n\t\t\t\t# new update for the IMAP Folder DocType\n\t\t\t\tIMAPFolder = frappe.qb.DocType(\"IMAP Folder\")\n\t\t\t\tfrappe.qb.update(IMAPFolder) \\\n\t\t\t\t\t.set(IMAPFolder.uidvalidity, current_uid_validity) \\\n\t\t\t\t\t.set(IMAPFolder.uidnext, uidnext) \\\n\t\t\t\t\t.where(IMAPFolder.parent == self.settings.email_account_name) \\\n\t\t\t\t\t.where(IMAPFolder.folder_name == folder).run()\n\t\t\telse:\n\t\t\t\tEmailAccount = frappe.qb.DocType(\"Email Account\")\n\t\t\t\tfrappe.qb.update(EmailAccount) \\\n\t\t\t\t\t.set(EmailAccount.uidvalidity, current_uid_validity) \\\n\t\t\t\t\t.set(EmailAccount.uidnext, uidnext) \\\n\t\t\t\t\t.where(EmailAccount.name == self.settings.email_account_name).run()\n\n\t\t\t# uid validity not found pulling emails for first time\n\t\t\tif not uid_validity:\n\t\t\t\tself.settings.email_sync_rule = \"UNSEEN\"\n\t\t\t\treturn\n\n\t\t\tsync_count = 100 if uid_validity else int(self.settings.initial_sync_count)\n\t\t\tfrom_uid = 1 if uidnext < (sync_count + 1) or (uidnext - sync_count) < 1 else uidnext - sync_count\n\t\t\t# sync last 100 email\n\t\t\tself.settings.email_sync_rule = \"UID {}:{}\".format(from_uid, uidnext)\n\t\t\tself.uid_reindexed = True\n\n\t\telif uid_validity == current_uid_validity:\n\t\t\treturn\n\n\tdef parse_imap_response(self, cmd, response):\n\t\tpattern = r\"(?<={cmd} )[0-9]*\".format(cmd=cmd)\n\t\tmatch = re.search(pattern, response.decode('utf-8'), re.U | re.I)\n\n\t\tif match:\n\t\t\treturn match.group(0)\n\t\telse:\n\t\t\treturn None\n\n\tdef retrieve_message(self, message_meta, msg_num=None):\n\t\tincoming_mail = None\n\t\ttry:\n\t\t\tself.validate_message_limits(message_meta)\n\n\t\t\tif cint(self.settings.use_imap):\n\t\t\t\tstatus, message = self.imap.uid('fetch', message_meta, '(BODY.PEEK[] BODY.PEEK[HEADER] FLAGS)')\n\t\t\t\traw = message[0]\n\n\t\t\t\tself.get_email_seen_status(message_meta, raw[0])\n\t\t\t\tself.latest_messages.append(raw[1])\n\t\t\telse:\n\t\t\t\tmsg = self.pop.retr(msg_num)\n\t\t\t\tself.latest_messages.append(b'\\n'.join(msg[1]))\n\t\texcept (TotalSizeExceededError, EmailTimeoutError):\n\t\t\t# propagate this error to break the loop\n\t\t\tself.errors = True\n\t\t\traise\n\n\t\texcept Exception as e:\n\t\t\tif self.has_login_limit_exceeded(e):\n\t\t\t\tself.errors = True\n\t\t\t\traise LoginLimitExceeded(e)\n\n\t\t\telse:\n\t\t\t\t# log performs rollback and logs error in Error Log\n\t\t\t\tfrappe.log_error(\"receive.get_messages\", self.make_error_msg(msg_num, incoming_mail))\n\t\t\t\tself.errors = True\n\t\t\t\tfrappe.db.rollback()\n\n\t\t\t\tif not cint(self.settings.use_imap):\n\t\t\t\t\tself.pop.dele(msg_num)\n\t\t\t\telse:\n\t\t\t\t\t# mark as seen if email sync rule is UNSEEN (syncing only unseen mails)\n\t\t\t\t\tif self.settings.email_sync_rule == \"UNSEEN\":\n\t\t\t\t\t\tself.imap.uid('STORE', message_meta, '+FLAGS', '(\\\\SEEN)')\n\t\telse:\n\t\t\tif not cint(self.settings.use_imap):\n\t\t\t\tself.pop.dele(msg_num)\n\t\t\telse:\n\t\t\t\t# mark as seen if email sync rule is UNSEEN (syncing only unseen mails)\n\t\t\t\tif self.settings.email_sync_rule == \"UNSEEN\":\n\t\t\t\t\tself.imap.uid('STORE', message_meta, '+FLAGS', '(\\\\SEEN)')\n\n\tdef get_email_seen_status(self, uid, flag_string):\n\t\t\"\"\" parse the email FLAGS response \"\"\"\n\t\tif not flag_string:\n\t\t\treturn None\n\n\t\tflags = []\n\t\tfor flag in imaplib.ParseFlags(flag_string) or []:\n\t\t\tpattern = re.compile(r\"\\w+\")\n\t\t\tmatch = re.search(pattern, frappe.as_unicode(flag))\n\t\t\tflags.append(match.group(0))\n\n\t\tif \"Seen\" in flags:\n\t\t\tself.seen_status.update({ uid: \"SEEN\" })\n\t\telse:\n\t\t\tself.seen_status.update({ uid: \"UNSEEN\" })\n\n\tdef has_login_limit_exceeded(self, e):\n\t\treturn \"-ERR Exceeded the login limit\" in strip(cstr(e.message))\n\n\tdef is_temporary_system_problem(self, e):\n\t\tmessages = (\n\t\t\t\"-ERR [SYS/TEMP] Temporary system problem. Please try again later.\",\n\t\t\t\"Connection timed out\",\n\t\t)\n\t\tfor message in messages:\n\t\t\tif message in strip(cstr(e)) or message in strip(cstr(getattr(e, 'strerror', ''))):\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef validate_message_limits(self, message_meta):\n\t\t# throttle based on email size\n\t\tif not self.max_email_size:\n\t\t\treturn\n\n\t\tm, size = message_meta.split()\n\t\tsize = cint(size)\n\n\t\tif size < self.max_email_size:\n\t\t\tself.total_size += size\n\t\t\tif self.total_size > self.max_total_size:\n\t\t\t\traise TotalSizeExceededError\n\t\telse:\n\t\t\traise EmailSizeExceededError\n\n\tdef make_error_msg(self, msg_num, incoming_mail):\n\t\terror_msg = \"Error in retrieving email.\"\n\t\tif not incoming_mail:\n\t\t\ttry:\n\t\t\t\t# retrieve headers\n\t\t\t\tincoming_mail = Email(b'\\n'.join(self.pop.top(msg_num, 5)[1]))\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\tif incoming_mail:\n\t\t\terror_msg += \"\\nDate: {date}\\nFrom: {from_email}\\nSubject: {subject}\\n\".format(\n\t\t\t\tdate=incoming_mail.date, from_email=incoming_mail.from_email, subject=incoming_mail.subject)\n\n\t\treturn error_msg\n\n\tdef update_flag(self, folder, uid_list=None):\n\t\t\"\"\" set all uids mails the flag as seen \"\"\"\n\t\tif not uid_list:\n\t\t\treturn\n\n\t\tif not self.connect():\n\t\t\treturn\n\n\t\tself.imap.select(folder)\n\t\tfor uid, operation in uid_list.items():\n\t\t\tif not uid: continue\n\n\t\t\top = \"+FLAGS\" if operation == \"Read\" else \"-FLAGS\"\n\t\t\ttry:\n\t\t\t\tself.imap.uid('STORE', uid, op, '(\\\\SEEN)')\n\t\t\texcept Exception:\n\t\t\t\tcontinue\n\nclass Email:\n\t\"\"\"Wrapper for an email.\"\"\"\n\tdef __init__(self, content):\n\t\t\"\"\"Parses headers, content, attachments from given raw message.\n\n\t\t:param content: Raw message.\"\"\"\n\t\tif isinstance(content, bytes):\n\t\t\tself.mail = email.message_from_bytes(content)\n\t\telse:\n\t\t\tself.mail = email.message_from_string(content)\n\n\t\tself.raw_message = content\n\t\tself.text_content = ''\n\t\tself.html_content = ''\n\t\tself.attachments = []\n\t\tself.cid_map = {}\n\t\tself.parse()\n\t\tself.set_content_and_type()\n\t\tself.set_subject()\n\t\tself.set_from()\n\t\tself.message_id = (self.mail.get('Message-ID') or \"\").strip(\" <>\")\n\n\t\tif self.mail[\"Date\"]:\n\t\t\ttry:\n\t\t\t\tutc = email.utils.mktime_tz(email.utils.parsedate_tz(self.mail[\"Date\"]))\n\t\t\t\tutc_dt = datetime.datetime.utcfromtimestamp(utc)\n\t\t\t\tself.date = convert_utc_to_user_timezone(utc_dt).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\texcept:\n\t\t\t\tself.date = now()\n\t\telse:\n\t\t\tself.date = now()\n\t\tif self.date > now():\n\t\t\tself.date = now()\n\n\t@property\n\tdef in_reply_to(self):\n\t\treturn (self.mail.get(\"In-Reply-To\") or \"\").strip(\" <>\")\n\n\tdef parse(self):\n\t\t\"\"\"Walk and process multi-part email.\"\"\"\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)\n\n\tdef set_subject(self):\n\t\t\"\"\"Parse and decode `Subject` header.\"\"\"\n\t\t_subject = decode_header(self.mail.get(\"Subject\", \"No Subject\"))\n\t\tself.subject = _subject[0][0] or \"\"\n\t\tif _subject[0][1]:\n\t\t\tself.subject = safe_decode(self.subject, _subject[0][1])\n\t\telse:\n\t\t\t# assume that the encoding is utf-8\n\t\t\tself.subject = safe_decode(self.subject)[:140]\n\n\t\tif not self.subject:\n\t\t\tself.subject = \"No Subject\"\n\n\tdef set_from(self):\n\t\t# gmail mailing-list compatibility\n\t\t# use X-Original-Sender if available, as gmail sometimes modifies the 'From'\n\t\t_from_email = self.decode_email(self.mail.get(\"X-Original-From\") or self.mail[\"From\"])\n\t\t_reply_to = self.decode_email(self.mail.get(\"Reply-To\"))\n\n\t\tif _reply_to and not frappe.db.get_value('Email Account', {\"email_id\":_reply_to}, 'email_id'):\n\t\t\tself.from_email = extract_email_id(_reply_to)\n\t\telse:\n\t\t\tself.from_email = extract_email_id(_from_email)\n\n\t\tif self.from_email:\n\t\t\tself.from_email = self.from_email.lower()\n\n\t\tself.from_real_name = parse_addr(_from_email)[0] if \"@\" in _from_email else _from_email\n\n\tdef decode_email(self, email):\n\t\tif not email: return\n\t\tdecoded = \"\"\n\t\tfor part, encoding in decode_header(frappe.as_unicode(email).replace(\"\\\"\",\" \").replace(\"\\'\",\" \")):\n\t\t\tif encoding:\n\t\t\t\tdecoded += part.decode(encoding)\n\t\t\telse:\n\t\t\t\tdecoded += safe_decode(part)\n\t\treturn decoded\n\n\tdef set_content_and_type(self):\n\t\tself.content, self.content_type = '[Blank Email]', 'text/plain'\n\t\tif self.html_content:\n\t\t\tself.content, self.content_type = self.html_content, 'text/html'\n\t\telse:\n\t\t\tself.content, self.content_type = EmailReplyParser.read(self.text_content).text.replace(\"\\n\",\"\\n\\n\"), 'text/plain'\n\n\tdef process_part(self, part):\n\t\t\"\"\"Parse email `part` and set it to `text_content`, `html_content` or `attachments`.\"\"\"\n\t\tcontent_type = part.get_content_type()\n\t\tif content_type == 'text/plain':\n\t\t\tself.text_content += self.get_payload(part)\n\n\t\telif content_type == 'text/html':\n\t\t\tself.html_content += self.get_payload(part)\n\n\t\telif content_type == 'message/rfc822':\n\t\t\t# sent by outlook when another email is sent as an attachment to this email\n\t\t\tself.show_attached_email_headers_in_content(part)\n\n\t\telif part.get_filename() or 'image' in content_type:\n\t\t\tself.get_attachment(part)\n\n\tdef show_attached_email_headers_in_content(self, part):\n\t\t# get the multipart/alternative message\n\t\ttry:\n\t\t\tfrom html import escape # python 3.x\n\t\texcept ImportError:\n\t\t\tfrom cgi import escape # python 2.x\n\n\t\tmessage = list(part.walk())[1]\n\t\theaders = []\n\t\tfor key in ('From', 'To', 'Subject', 'Date'):\n\t\t\tvalue = cstr(message.get(key))\n\t\t\tif value:\n\t\t\t\theaders.append('{label}: {value}'.format(label=_(key), value=escape(value)))\n\n\t\tself.text_content += '\\n'.join(headers)\n\t\tself.html_content += '<hr>' + '\\n'.join('<p>{0}</p>'.format(h) for h in headers)\n\n\t\tif not message.is_multipart() and message.get_content_type()=='text/plain':\n\t\t\t# email.parser didn't parse it!\n\t\t\ttext_content = self.get_payload(message)\n\t\t\tself.text_content += text_content\n\t\t\tself.html_content += markdown(text_content)\n\n\tdef get_charset(self, part):\n\t\t\"\"\"Detect charset.\"\"\"\n\t\tcharset = part.get_content_charset()\n\t\tif not charset:\n\t\t\tcharset = chardet.detect(safe_encode(cstr(part)))['encoding']\n\n\t\treturn charset\n\n\tdef get_payload(self, part):\n\t\tcharset = self.get_charset(part)\n\n\t\ttry:\n\t\t\treturn str(part.get_payload(decode=True), str(charset), \"ignore\")\n\t\texcept LookupError:\n\t\t\treturn part.get_payload()\n\n\tdef get_attachment(self, part):\n\t\t#charset = self.get_charset(part)\n\t\tfcontent = part.get_payload(decode=True)\n\n\t\tif fcontent:\n\t\t\tcontent_type = part.get_content_type()\n\t\t\tfname = part.get_filename()\n\t\t\tif fname:\n\t\t\t\ttry:\n\t\t\t\t\tfname = fname.replace('\\n', ' ').replace('\\r', '')\n\t\t\t\t\tfname = cstr(decode_header(fname)[0][0])\n\t\t\t\texcept:\n\t\t\t\t\tfname = get_random_filename(content_type=content_type)\n\t\t\telse:\n\t\t\t\tfname = get_random_filename(content_type=content_type)\n\n\t\t\tself.attachments.append({\n\t\t\t\t'content_type': content_type,\n\t\t\t\t'fname': fname,\n\t\t\t\t'fcontent': fcontent,\n\t\t\t})\n\n\t\t\tcid = (cstr(part.get(\"Content-Id\")) or \"\").strip(\"><\")\n\t\t\tif cid:\n\t\t\t\tself.cid_map[fname] = cid\n\n\tdef save_attachments_in_doc(self, doc):\n\t\t\"\"\"Save email attachments in given document.\"\"\"\n\t\tsaved_attachments = []\n\n\t\tfor attachment in self.attachments:\n\t\t\ttry:\n\t\t\t\t_file = frappe.get_doc({\n\t\t\t\t\t\"doctype\": \"File\",\n\t\t\t\t\t\"file_name\": attachment['fname'],\n\t\t\t\t\t\"attached_to_doctype\": doc.doctype,\n\t\t\t\t\t\"attached_to_name\": doc.name,\n\t\t\t\t\t\"is_private\": 1,\n\t\t\t\t\t\"content\": attachment['fcontent']})\n\t\t\t\t_file.save()\n\t\t\t\tsaved_attachments.append(_file)\n\n\t\t\t\tif attachment['fname'] in self.cid_map:\n\t\t\t\t\tself.cid_map[_file.name] = self.cid_map[attachment['fname']]\n\n\t\t\texcept MaxFileSizeReachedError:\n\t\t\t\t# WARNING: bypass max file size exception\n\t\t\t\tpass\n\t\t\texcept frappe.FileAlreadyAttachedException:\n\t\t\t\tpass\n\t\t\texcept frappe.DuplicateEntryError:\n\t\t\t\t# same file attached twice??\n\t\t\t\tpass\n\n\t\treturn saved_attachments\n\n\tdef get_thread_id(self):\n\t\t\"\"\"Extract thread ID from `[]`\"\"\"\n\t\tl = re.findall(r'(?<=\\[)[\\w/-]+', self.subject)\n\t\treturn l and l[0] or None\n\n\tdef is_reply(self):\n\t\treturn bool(self.in_reply_to)\n\nclass InboundMail(Email):\n\t\"\"\"Class representation of incoming mail along with mail handlers.\n\t\"\"\"\n\tdef __init__(self, content, email_account, uid=None, seen_status=None):\n\t\tsuper().__init__(content)\n\t\tself.email_account = email_account\n\t\tself.uid = uid or -1\n\t\tself.seen_status = seen_status or 0\n\n\t\t# System documents related to this mail\n\t\tself._parent_email_queue = None\n\t\tself._parent_communication = None\n\t\tself._reference_document = None\n\n\t\tself.flags = frappe._dict()\n\n\tdef get_content(self):\n\t\tif self.content_type == 'text/html':\n\t\t\treturn clean_email_html(self.content)\n\n\tdef process(self):\n\t\t\"\"\"Create communication record from email.\n\t\t\"\"\"\n\t\tif self.is_sender_same_as_receiver() and not self.is_reply():\n\t\t\tif frappe.flags.in_test:\n\t\t\t\tprint('WARN: Cannot pull email. Sender same as recipient inbox')\n\t\t\traise SentEmailInInboxError\n\n\t\tcommunication = self.is_exist_in_system()\n\t\tif communication:\n\t\t\tcommunication.update_db(uid=self.uid)\n\t\t\tcommunication.reload()\n\t\t\treturn communication\n\n\t\tself.flags.is_new_communication = True\n\t\treturn self._build_communication_doc()\n\n\tdef _build_communication_doc(self):\n\t\tdata = self.as_dict()\n\t\tdata['doctype'] = \"Communication\"\n\n\t\tif self.parent_communication():\n\t\t\tdata['in_reply_to'] = self.parent_communication().name\n\n\t\tif self.reference_document():\n\t\t\tdata['reference_doctype'] = self.reference_document().doctype\n\t\t\tdata['reference_name'] = self.reference_document().name\n\t\telif self.email_account.append_to and self.email_account.append_to != 'Communication':\n\t\t\treference_doc = self._create_reference_document(self.email_account.append_to)\n\t\t\tif reference_doc:\n\t\t\t\tdata['reference_doctype'] = reference_doc.doctype\n\t\t\t\tdata['reference_name'] = reference_doc.name\n\t\t\t\tdata['is_first'] = True\n\n\t\tif self.is_notification():\n\t\t\t# Disable notifications for notification.\n\t\t\tdata['unread_notification_sent'] = 1\n\n\t\tif self.seen_status:\n\t\t\tdata['_seen'] = json.dumps(self.get_users_linked_to_account(self.email_account))\n\n\t\tcommunication = frappe.get_doc(data)\n\t\tcommunication.flags.in_receive = True\n\t\tcommunication.insert(ignore_permissions=True)\n\n\t\t# save attachments\n\t\tcommunication._attachments = self.save_attachments_in_doc(communication)\n\t\tcommunication.content = sanitize_html(self.replace_inline_images(communication._attachments))\n\t\tcommunication.save()\n\t\treturn communication\n\n\tdef replace_inline_images(self, attachments):\n\t\t# replace inline images\n\t\tcontent = self.content\n\t\tfor file in attachments:\n\t\t\tif file.name in self.cid_map and self.cid_map[file.name]:\n\t\t\t\tcontent = content.replace(\"cid:{0}\".format(self.cid_map[file.name]),\n\t\t\t\t\tfile.file_url)\n\t\treturn content\n\n\tdef is_notification(self):\n\t\tisnotification = self.mail.get(\"isnotification\")\n\t\treturn isnotification and (\"notification\" in isnotification)\n\n\tdef is_exist_in_system(self):\n\t\t\"\"\"Check if this email already exists in the system(as communication document).\n\t\t\"\"\"\n\t\tfrom frappe.core.doctype.communication.communication import Communication\n\t\tif not self.message_id:\n\t\t\treturn\n\n\t\treturn Communication.find_one_by_filters(message_id = self.message_id,\n\t\t\torder_by = 'creation DESC')\n\n\tdef is_sender_same_as_receiver(self):\n\t\treturn self.from_email == self.email_account.email_id\n\n\tdef is_reply_to_system_sent_mail(self):\n\t\t\"\"\"Is it a reply to already sent mail.\n\t\t\"\"\"\n\t\treturn self.is_reply() and frappe.local.site in self.in_reply_to\n\n\tdef parent_email_queue(self):\n\t\t\"\"\"Get parent record from `Email Queue`.\n\n\t\tIf it is a reply to already sent mail, then there will be a parent record in EMail Queue.\n\t\t\"\"\"\n\t\tfrom frappe.email.doctype.email_queue.email_queue import EmailQueue\n\n\t\tif self._parent_email_queue is not None:\n\t\t\treturn self._parent_email_queue\n\n\t\tparent_email_queue = ''\n\t\tif self.is_reply_to_system_sent_mail():\n\t\t\tparent_email_queue = EmailQueue.find_one_by_filters(message_id=self.in_reply_to)\n\n\t\tself._parent_email_queue = parent_email_queue or ''\n\t\treturn self._parent_email_queue\n\n\tdef parent_communication(self):\n\t\t\"\"\"Find a related communication so that we can prepare a mail thread.\n\n\t\tThe way it happens is by using in-reply-to header, and we can't make thread if it does not exist.\n\n\t\tHere are the cases to handle:\n\t\t1. If mail is a reply to already sent mail, then we can get parent communicaion from\n\t\t\tEmail Queue record.\n\t\t2. Sometimes we send communication name in message-ID directly, use that to get parent communication.\n\t\t3. Sender sent a reply but reply is on top of what (s)he sent before,\n\t\t\tthen parent record exists directly in communication.\n\t\t\"\"\"\n\t\tfrom frappe.core.doctype.communication.communication import Communication\n\t\tif self._parent_communication is not None:\n\t\t\treturn self._parent_communication\n\n\t\tif not self.is_reply():\n\t\t\treturn ''\n\n\t\tif not self.is_reply_to_system_sent_mail():\n\t\t\tcommunication = Communication.find_one_by_filters(message_id=self.in_reply_to,\n\t\t\t\tcreation = ['>=', self.get_relative_dt(-30)])\n\t\telif self.parent_email_queue() and self.parent_email_queue().communication:\n\t\t\tcommunication = Communication.find(self.parent_email_queue().communication, ignore_error=True)\n\t\telse:\n\t\t\treference = self.in_reply_to\n\t\t\tif '@' in self.in_reply_to:\n\t\t\t\treference, _ = self.in_reply_to.split(\"@\", 1)\n\t\t\tcommunication = Communication.find(reference, ignore_error=True)\n\n\t\tself._parent_communication = communication or ''\n\t\treturn self._parent_communication\n\n\tdef reference_document(self):\n\t\t\"\"\"Reference document is a document to which mail relate to.\n\n\t\tWe can get reference document from Parent record(EmailQueue | Communication) if exists.\n\t\tOtherwise we do subject match to find reference document if we know the reference(append_to) doctype.\n\t\t\"\"\"\n\t\tif self._reference_document is not None:\n\t\t\treturn self._reference_document\n\n\t\treference_document = \"\"\n\t\tparent = self.parent_email_queue() or self.parent_communication()\n\n\t\tif parent and parent.reference_doctype:\n\t\t\treference_doctype, reference_name = parent.reference_doctype, parent.reference_name\n\t\t\treference_document = self.get_doc(reference_doctype, reference_name, ignore_error=True)\n\n\t\tif not reference_document and self.email_account.append_to:\n\t\t\treference_document = self.match_record_by_subject_and_sender(self.email_account.append_to)\n\n\t\tself._reference_document = reference_document or ''\n\t\treturn self._reference_document\n\n\tdef get_reference_name_from_subject(self):\n\t\t\"\"\"\n\t\tEx: \"Re: Your email (#OPP-2020-2334343)\"\n\t\t\"\"\"\n\t\treturn self.subject.rsplit('#', 1)[-1].strip(' ()')\n\n\tdef match_record_by_subject_and_sender(self, doctype):\n\t\t\"\"\"Find a record in the given doctype that matches with email subject and sender.\n\n\t\tCases:\n\t\t1. Sometimes record name is part of subject. We can get document by parsing name from subject\n\t\t2. Find by matching sender and subject\n\t\t3. Find by matching subject alone (Special case)\n\t\t\tEx: when a System User is using Outlook and replies to an email from their own client,\n\t\t\tit reaches the Email Account with the threading info lost and the (sender + subject match)\n\t\t\tdoesn't work because the sender in the first communication was someone different to whom\n\t\t\tthe system user is replying to via the common email account in Frappe. This fix bypasses\n\t\t\tthe sender match when the sender is a system user and subject is atleast 10 chars long\n\t\t\t(for additional safety)\n\n\t\tNOTE: We consider not to match by subject if match record is very old.\n\t\t\"\"\"\n\t\tname = self.get_reference_name_from_subject()\n\t\temail_fields = self.get_email_fields(doctype)\n\n\t\trecord = self.get_doc(doctype, name, ignore_error=True) if name else None\n\n\t\tif not record:\n\t\t\tsubject = self.clean_subject(self.subject)\n\t\t\tfilters = {\n\t\t\t\temail_fields.subject_field: (\"like\", f\"%{subject}%\"),\n\t\t\t\t\"creation\": (\">\", self.get_relative_dt(days=-60))\n\t\t\t}\n\n\t\t\t# Sender check is not needed incase mail is from system user.\n\t\t\tif not (len(subject) > 10 and is_system_user(self.from_email)):\n\t\t\t\tfilters[email_fields.sender_field] = self.from_email\n\n\t\t\tname = frappe.db.get_value(self.email_account.append_to, filters = filters)\n\t\t\trecord = self.get_doc(doctype, name, ignore_error=True) if name else None\n\t\treturn record\n\n\tdef _create_reference_document(self, doctype):\n\t\t\"\"\" Create reference document if it does not exist in the system.\n\t\t\"\"\"\n\t\tparent = frappe.new_doc(doctype)\n\t\temail_fileds = self.get_email_fields(doctype)\n\n\t\tif email_fileds.subject_field:\n\t\t\tparent.set(email_fileds.subject_field, frappe.as_unicode(self.subject)[:140])\n\n\t\tif email_fileds.sender_field:\n\t\t\tparent.set(email_fileds.sender_field, frappe.as_unicode(self.from_email))\n\n\t\tparent.flags.ignore_mandatory = True\n\n\t\ttry:\n\t\t\tparent.insert(ignore_permissions=True)\n\t\texcept frappe.DuplicateEntryError:\n\t\t\t# try and find matching parent\n\t\t\tparent_name = frappe.db.get_value(self.email_account.append_to,\n\t\t\t\t{email_fileds.sender_field: self.from_email}\n\t\t\t)\n\t\t\tif parent_name:\n\t\t\t\tparent.name = parent_name\n\t\t\telse:\n\t\t\t\tparent = None\n\t\treturn parent\n\n\n\t@staticmethod\n\tdef get_doc(doctype, docname, ignore_error=False):\n\t\ttry:\n\t\t\treturn frappe.get_doc(doctype, docname)\n\t\texcept frappe.DoesNotExistError:\n\t\t\tif ignore_error:\n\t\t\t\treturn\n\t\t\traise\n\n\t@staticmethod\n\tdef get_relative_dt(days):\n\t\t\"\"\"Get relative to current datetime. Only relative days are supported.\n\t\t\"\"\"\n\t\treturn add_days(get_datetime(), days)\n\n\t@staticmethod\n\tdef get_users_linked_to_account(email_account):\n\t\t\"\"\"Get list of users who linked to Email account.\n\t\t\"\"\"\n\t\tusers = frappe.get_all(\"User Email\", filters={\"email_account\": email_account.name},\n\t\t\tfields=[\"parent\"])\n\t\treturn list(set([user.get(\"parent\") for user in users]))\n\n\t@staticmethod\n\tdef clean_subject(subject):\n\t\t\"\"\"Remove Prefixes like 'fw', FWD', 're' etc from subject.\n\t\t\"\"\"\n\t\t# Match strings like \"fw:\", \"re\t:\" etc.\n\t\tregex = r\"(^\\s*(fw|fwd|wg)[^:]*:|\\s*(re|aw)[^:]*:\\s*)*\"\n\t\treturn frappe.as_unicode(strip(re.sub(regex, \"\", subject, 0, flags=re.IGNORECASE)))\n\n\t@staticmethod\n\tdef get_email_fields(doctype):\n\t\t\"\"\"Returns Email related fields of a doctype.\n\t\t\"\"\"\n\t\tfields = frappe._dict()\n\n\t\temail_fields = ['subject_field', 'sender_field']\n\t\tmeta = frappe.get_meta(doctype)\n\n\t\tfor field in email_fields:\n\t\t\tif hasattr(meta, field):\n\t\t\t\tfields[field] = getattr(meta, field)\n\t\treturn fields\n\n\t@staticmethod\n\tdef get_document(self, doctype, name):\n\t\t\"\"\"Is same as frappe.get_doc but suppresses the DoesNotExist error.\n\t\t\"\"\"\n\t\ttry:\n\t\t\treturn frappe.get_doc(doctype, name)\n\t\texcept frappe.DoesNotExistError:\n\t\t\treturn None\n\n\tdef as_dict(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\treturn {\n\t\t\t\"subject\": self.subject,\n\t\t\t\"content\": self.get_content(),\n\t\t\t'text_content': self.text_content,\n\t\t\t\"sent_or_received\": \"Received\",\n\t\t\t\"sender_full_name\": self.from_real_name,\n\t\t\t\"sender\": self.from_email,\n\t\t\t\"recipients\": self.mail.get(\"To\"),\n\t\t\t\"cc\": self.mail.get(\"CC\"),\n\t\t\t\"email_account\": self.email_account.name,\n\t\t\t\"communication_medium\": \"Email\",\n\t\t\t\"uid\": self.uid,\n\t\t\t\"message_id\": self.message_id,\n\t\t\t\"communication_date\": self.date,\n\t\t\t\"has_attachment\": 1 if self.attachments else 0,\n\t\t\t\"seen\": self.seen_status or 0\n\t\t}\n\nclass TimerMixin(object):\n\tdef __init__(self, *args, **kwargs):\n\t\tself.timeout = kwargs.pop('timeout', 0.0)\n\t\tself.elapsed_time = 0.0\n\t\tself._super.__init__(self, *args, **kwargs)\n\t\tif self.timeout:\n\t\t\t# set per operation timeout to one-fifth of total pop timeout\n\t\t\tself.sock.settimeout(self.timeout / 5.0)\n\n\tdef _getline(self, *args, **kwargs):\n\t\tstart_time = time.time()\n\t\tret = self._super._getline(self, *args, **kwargs)\n\n\t\tself.elapsed_time += time.time() - start_time\n\t\tif self.timeout and self.elapsed_time > self.timeout:\n\t\t\traise EmailTimeoutError\n\n\t\treturn ret\n\n\tdef quit(self, *args, **kwargs):\n\t\tself.elapsed_time = 0.0\n\t\treturn self._super.quit(self, *args, **kwargs)\n\nclass Timed_POP3(TimerMixin, poplib.POP3):\n\t_super = poplib.POP3\n\nclass Timed_POP3_SSL(TimerMixin, poplib.POP3_SSL):\n\t_super = poplib.POP3_SSL\n\nclass Timed_IMAP4(TimerMixin, imaplib.IMAP4):\n\t_super = imaplib.IMAP4\n\nclass Timed_IMAP4_SSL(TimerMixin, imaplib.IMAP4_SSL):\n\t_super = imaplib.IMAP4_SSL\n", "path": "frappe/email/receive.py" } ]
diff --git a/frappe/email/receive.py b/frappe/email/receive.py index 4f4ed6d48ec4..dd64d0df80e9 100644 --- a/frappe/email/receive.py +++ b/frappe/email/receive.py @@ -27,11 +27,7 @@ # fix due to a python bug in poplib that limits it to 2048 poplib._MAXLINE = 20480 -imaplib._MAXLINE = 20480 -# fix due to a python bug in poplib that limits it to 2048 -poplib._MAXLINE = 20480 -imaplib._MAXLINE = 20480 class EmailSizeExceededError(frappe.ValidationError): pass
svthalia__concrexit-3188
Non-closable site announcements can secretly be closed if you send the right cookie ### Describe the bug Non-closable site announcements can secretly be closed if you send the right cookie ### How to reproduce Steps to reproduce the behaviour: 1. Have an announcement that is closable 2. Close it on your machine 3. Change the announcement to be non-closable 4. It still isn't there because you already closed it. The `closable` field just controls whether the close button appears or not, but it doesn't influence the actual logic ### Expected behaviour Always show non-closable announcements ### Screenshots ### Additional context
[ { "content": "\"\"\"These context processors can be used to expand the context provided to admin views.\"\"\"\nfrom .models import Announcement\n\n\ndef announcements(request):\n \"\"\"Get a list of announcements.\n\n Filters out announcements that have been closed already.\n\n :param request: the request object\n :return: a dict containing the list announcements\n :rtype: dict\n \"\"\"\n closed_announcements = request.session.get(\"closed_announcements\", [])\n announcements_list = [\n a\n for a in Announcement.objects.all()\n if a.is_visible and a.pk not in closed_announcements\n ]\n\n # Announcements set by AnnouncementMiddleware.\n persistent_announcements = getattr(request, \"_announcements\", [])\n return {\n \"announcements\": announcements_list,\n \"persistent_announcements\": persistent_announcements,\n }\n", "path": "website/announcements/context_processors.py" } ]
[ { "content": "\"\"\"These context processors can be used to expand the context provided to admin views.\"\"\"\nfrom .models import Announcement\n\n\ndef announcements(request):\n \"\"\"Get a list of announcements.\n\n Filters out announcements that have been closed already.\n\n :param request: the request object\n :return: a dict containing the list announcements\n :rtype: dict\n \"\"\"\n closed_announcements = request.session.get(\"closed_announcements\", [])\n announcements_list = [\n a\n for a in Announcement.objects.all()\n if a.is_visible and (not a.closeable or a.pk not in closed_announcements)\n ]\n\n # Announcements set by AnnouncementMiddleware.\n persistent_announcements = getattr(request, \"_announcements\", [])\n return {\n \"announcements\": announcements_list,\n \"persistent_announcements\": persistent_announcements,\n }\n", "path": "website/announcements/context_processors.py" } ]
diff --git a/website/announcements/context_processors.py b/website/announcements/context_processors.py index 3b5a715ad..934453e87 100644 --- a/website/announcements/context_processors.py +++ b/website/announcements/context_processors.py @@ -15,7 +15,7 @@ def announcements(request): announcements_list = [ a for a in Announcement.objects.all() - if a.is_visible and a.pk not in closed_announcements + if a.is_visible and (not a.closeable or a.pk not in closed_announcements) ] # Announcements set by AnnouncementMiddleware.
geopandas__geopandas-1583
BUG: legend misplaced in subplots I'm using v0.8.0. When I plot on multiple subplots and pass `legend_kwds` to `gdf.plot`, it draws all the subplot legends on the same subplot. This works as expected: ```python import geopandas as gpd import matplotlib.pyplot as plt gdf = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) fig, axes = plt.subplots(3, 2, figsize=(8,8)) for ax in axes.flat: gdf.plot(ax=ax, column='gdp_md_est', legend=True) ``` ![1](https://user-images.githubusercontent.com/4977197/89090446-cccfb000-d357-11ea-8c9f-1505629b4f80.png) But this draws the legends all on the same subplot: ```python legend_kwds = {'orientation': 'horizontal'} fig, axes = plt.subplots(3, 2, figsize=(8,8)) for ax in axes.flat: gdf.plot(ax=ax, column='gdp_md_est', legend=True, legend_kwds=legend_kwds) ``` ![2](https://user-images.githubusercontent.com/4977197/89090447-ce997380-d357-11ea-9d86-7e5c6fb4edbf.png) Is this user error or a bug?
[ { "content": "import warnings\n\nimport numpy as np\nimport pandas as pd\n\nimport geopandas\n\nfrom distutils.version import LooseVersion\n\n\ndef deprecated(new):\n \"\"\"Helper to provide deprecation warning.\"\"\"\n\n def old(*args, **kwargs):\n warnings.warn(\n \"{} is intended for internal \".format(new.__name__[1:])\n + \"use only, and will be deprecated.\",\n DeprecationWarning,\n stacklevel=2,\n )\n new(*args, **kwargs)\n\n return old\n\n\ndef _flatten_multi_geoms(geoms, prefix=\"Multi\"):\n \"\"\"\n Returns Series like geoms and index, except that any Multi geometries\n are split into their components and indices are repeated for all component\n in the same Multi geometry. Maintains 1:1 matching of geometry to value.\n\n Prefix specifies type of geometry to be flatten. 'Multi' for MultiPoint and similar,\n \"Geom\" for GeometryCollection.\n\n Returns\n -------\n components : list of geometry\n\n component_index : index array\n indices are repeated for all components in the same Multi geometry\n \"\"\"\n components, component_index = [], []\n\n if not geoms.geom_type.str.startswith(prefix).any():\n return geoms, np.arange(len(geoms))\n\n for ix, geom in enumerate(geoms):\n if geom.type.startswith(prefix):\n for poly in geom:\n components.append(poly)\n component_index.append(ix)\n else:\n components.append(geom)\n component_index.append(ix)\n\n return components, np.array(component_index)\n\n\ndef _expand_kwargs(kwargs, multiindex):\n \"\"\"\n Most arguments to the plot functions must be a (single) value, or a sequence\n of values. This function checks each key-value pair in 'kwargs' and expands\n it (in place) to the correct length/formats with help of 'multiindex', unless\n the value appears to already be a valid (single) value for the key.\n \"\"\"\n from matplotlib.colors import is_color_like\n from typing import Iterable\n\n for att, value in kwargs.items():\n if \"color\" in att: # color(s), edgecolor(s), facecolor(s)\n if is_color_like(value):\n continue\n elif \"linestyle\" in att: # linestyle(s)\n # A single linestyle can be 2-tuple of a number and an iterable.\n if (\n isinstance(value, tuple)\n and len(value) == 2\n and isinstance(value[1], Iterable)\n ):\n continue\n elif att in [\"marker\", \"alpha\"]:\n # For these attributes, only a single value is allowed, so never expand.\n continue\n\n if pd.api.types.is_list_like(value):\n kwargs[att] = np.take(value, multiindex, axis=0)\n\n\ndef _plot_polygon_collection(\n ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs\n):\n \"\"\"\n Plots a collection of Polygon and MultiPolygon geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)\n\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n Otherwise follows `color` / `facecolor` kwargs.\n edgecolor : single color or sequence of `N` colors\n Color for the edge of the polygons\n facecolor : single color or sequence of `N` colors\n Color to fill the polygons. Cannot be used together with `values`.\n color : single color or sequence of `N` colors\n Sets both `edgecolor` and `facecolor`\n **kwargs\n Additional keyword arguments passed to the collection\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n\n try:\n from descartes.patch import PolygonPatch\n except ImportError:\n raise ImportError(\n \"The descartes package is required for plotting polygons in geopandas. \"\n \"You can install it using 'conda install -c conda-forge descartes' or \"\n \"'pip install descartes'.\"\n )\n from matplotlib.collections import PatchCollection\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n # PatchCollection does not accept some kwargs.\n kwargs = {\n att: value\n for att, value in kwargs.items()\n if att not in [\"markersize\", \"marker\"]\n }\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n\n _expand_kwargs(kwargs, multiindex)\n\n collection = PatchCollection([PolygonPatch(poly) for poly in geoms], **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n if \"norm\" not in kwargs:\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\nplot_polygon_collection = deprecated(_plot_polygon_collection)\n\n\ndef _plot_linestring_collection(\n ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs\n):\n \"\"\"\n Plots a collection of LineString and MultiLineString geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be\n mixed)\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n color : single color or sequence of `N` colors\n Cannot be used together with `values`.\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n from matplotlib.collections import LineCollection\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n # LineCollection does not accept some kwargs.\n kwargs = {\n att: value\n for att, value in kwargs.items()\n if att not in [\"markersize\", \"marker\"]\n }\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n\n _expand_kwargs(kwargs, multiindex)\n\n segments = [np.array(linestring)[:, :2] for linestring in geoms]\n collection = LineCollection(segments, **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n if \"norm\" not in kwargs:\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\nplot_linestring_collection = deprecated(_plot_linestring_collection)\n\n\ndef _plot_point_collection(\n ax,\n geoms,\n values=None,\n color=None,\n cmap=None,\n vmin=None,\n vmax=None,\n marker=\"o\",\n markersize=None,\n **kwargs\n):\n \"\"\"\n Plots a collection of Point and MultiPoint geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : sequence of `N` Points or MultiPoints\n\n values : a sequence of `N` values, optional\n Values mapped to colors using vmin, vmax, and cmap.\n Cannot be specified together with `color`.\n markersize : scalar or array-like, optional\n Size of the markers. Note that under the hood ``scatter`` is\n used, so the specified value will be proportional to the\n area of the marker (size in points^2).\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n if values is not None and color is not None:\n raise ValueError(\"Can only specify one of 'values' and 'color' kwargs\")\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n x = [p.x for p in geoms]\n y = [p.y for p in geoms]\n\n # matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None\n if values is not None:\n kwargs[\"c\"] = values\n if markersize is not None:\n kwargs[\"s\"] = markersize\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n if marker is not None:\n kwargs[\"marker\"] = marker\n _expand_kwargs(kwargs, multiindex)\n\n if \"norm\" not in kwargs:\n collection = ax.scatter(x, y, vmin=vmin, vmax=vmax, cmap=cmap, **kwargs)\n else:\n collection = ax.scatter(x, y, cmap=cmap, **kwargs)\n\n return collection\n\n\nplot_point_collection = deprecated(_plot_point_collection)\n\n\ndef plot_series(\n s, cmap=None, color=None, ax=None, figsize=None, aspect=\"auto\", **style_kwds\n):\n \"\"\"\n Plot a GeoSeries.\n\n Generate a plot of a GeoSeries geometry with matplotlib.\n\n Parameters\n ----------\n s : Series\n The GeoSeries to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib. Any\n colormap will work, but categorical colormaps are\n generally recommended. Examples of useful discrete\n colormaps include:\n\n tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2\n\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n figsize : pair of floats (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n ax is given explicitly, figsize is ignored.\n aspect : 'auto', 'equal' or float (default 'auto')\n Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if\n however data are not projected (coordinates are long/lat), the aspect is by\n default set to 1/cos(s_y * pi/180) with s_y the y coordinate of the middle of\n the GeoSeries (the mean of the y range of bounding box) so that a long/lat\n square appears square in the middle of the plot. This implies an\n Equirectangular projection. It can also be set manually (float) as the ratio\n of y-unit to x-unit.\n **style_kwds : dict\n Color options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n \"\"\"\n if \"colormap\" in style_kwds:\n warnings.warn(\n \"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\",\n FutureWarning,\n )\n cmap = style_kwds.pop(\"colormap\")\n if \"axes\" in style_kwds:\n warnings.warn(\n \"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\",\n FutureWarning,\n )\n ax = style_kwds.pop(\"axes\")\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\n \"The matplotlib package is required for plotting in geopandas. \"\n \"You can install it using 'conda install -c conda-forge matplotlib' or \"\n \"'pip install matplotlib'.\"\n )\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n if aspect == \"auto\":\n if s.crs and s.crs.is_geographic:\n bounds = s.total_bounds\n y_coord = np.mean([bounds[1], bounds[3]])\n ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))\n # formula ported from R package sp\n # https://github.com/edzer/sp/blob/master/R/mapasp.R\n else:\n ax.set_aspect(\"equal\")\n else:\n ax.set_aspect(aspect)\n\n if s.empty:\n warnings.warn(\n \"The GeoSeries you are attempting to plot is \"\n \"empty. Nothing has been displayed.\",\n UserWarning,\n )\n return ax\n\n # if cmap is specified, create range of colors based on cmap\n values = None\n if cmap is not None:\n values = np.arange(len(s))\n if hasattr(cmap, \"N\"):\n values = values % cmap.N\n style_kwds[\"vmin\"] = style_kwds.get(\"vmin\", values.min())\n style_kwds[\"vmax\"] = style_kwds.get(\"vmax\", values.max())\n\n # decompose GeometryCollections\n geoms, multiindex = _flatten_multi_geoms(s.geometry, prefix=\"Geom\")\n values = np.take(values, multiindex, axis=0) if cmap else None\n expl_series = geopandas.GeoSeries(geoms)\n\n geom_types = expl_series.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"MultiLineString\")\n | (geom_types == \"LinearRing\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = expl_series[poly_idx]\n if not polys.empty:\n # color overrides both face and edgecolor. As we want people to be\n # able to use edgecolor as well, pass color to facecolor\n facecolor = style_kwds.pop(\"facecolor\", None)\n if color is not None:\n facecolor = color\n\n values_ = values[poly_idx] if cmap else None\n _plot_polygon_collection(\n ax, polys, values_, facecolor=facecolor, cmap=cmap, **style_kwds\n )\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = expl_series[line_idx]\n if not lines.empty:\n values_ = values[line_idx] if cmap else None\n _plot_linestring_collection(\n ax, lines, values_, color=color, cmap=cmap, **style_kwds\n )\n\n # plot all Points in the same collection\n points = expl_series[point_idx]\n if not points.empty:\n values_ = values[point_idx] if cmap else None\n _plot_point_collection(\n ax, points, values_, color=color, cmap=cmap, **style_kwds\n )\n\n plt.draw()\n return ax\n\n\ndef plot_dataframe(\n df,\n column=None,\n cmap=None,\n color=None,\n ax=None,\n cax=None,\n categorical=False,\n legend=False,\n scheme=None,\n k=5,\n vmin=None,\n vmax=None,\n markersize=None,\n figsize=None,\n legend_kwds=None,\n categories=None,\n classification_kwds=None,\n missing_kwds=None,\n aspect=\"auto\",\n **style_kwds\n):\n \"\"\"\n Plot a GeoDataFrame.\n\n Generate a plot of a GeoDataFrame with matplotlib. If a\n column is specified, the plot coloring will be based on values\n in that column.\n\n Parameters\n ----------\n df : GeoDataFrame\n The GeoDataFrame to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n column : str, np.array, pd.Series (default None)\n The name of the dataframe column, np.array, or pd.Series to be plotted.\n If np.array or pd.Series are used then it must have same length as\n dataframe. Values are used to color the plot. Ignored if `color` is\n also set.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib.\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n cax : matplotlib.pyplot Artist (default None)\n axes on which to draw the legend in case of color map.\n categorical : bool (default False)\n If False, cmap will reflect numerical values of the\n column being plotted. For non-numerical columns, this\n will be set to True.\n legend : bool (default False)\n Plot a legend. Ignored if no `column` is given, or if `color` is given.\n scheme : str (default None)\n Name of a choropleth classification scheme (requires mapclassify).\n A mapclassify.MapClassifier object will be used\n under the hood. Supported are all schemes provided by mapclassify (e.g.\n 'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled',\n 'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced',\n 'JenksCaspallSampled', 'MaxP', 'MaximumBreaks',\n 'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean',\n 'UserDefined'). Arguments can be passed in classification_kwds.\n k : int (default 5)\n Number of classes (ignored if scheme is None)\n vmin : None or float (default None)\n Minimum value of cmap. If None, the minimum data value\n in the column to be plotted is used.\n vmax : None or float (default None)\n Maximum value of cmap. If None, the maximum data value\n in the column to be plotted is used.\n markersize : str or float or sequence (default None)\n Only applies to point geometries within a frame.\n If a str, will use the values in the column of the frame specified\n by markersize to set the size of markers. Otherwise can be a value\n to apply to all points, or a sequence of the same length as the\n number of points.\n figsize : tuple of integers (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n axes is given explicitly, figsize is ignored.\n legend_kwds : dict (default None)\n Keyword arguments to pass to matplotlib.pyplot.legend() or\n matplotlib.pyplot.colorbar().\n Additional accepted keywords when `scheme` is specified:\n\n fmt : string\n A formatting specification for the bin edges of the classes in the\n legend. For example, to have no decimals: ``{\"fmt\": \"{:.0f}\"}``.\n labels : list-like\n A list of legend labels to override the auto-generated labels.\n Needs to have the same number of elements as the number of\n classes (`k`).\n categories : list-like\n Ordered list-like object of categories to be used for categorical plot.\n classification_kwds : dict (default None)\n Keyword arguments to pass to mapclassify\n missing_kwds : dict (default None)\n Keyword arguments specifying color options (as style_kwds)\n to be passed on to geometries with missing values in addition to\n or overwriting other style kwds. If None, geometries with missing\n values are not plotted.\n aspect : 'auto', 'equal' or float (default 'auto')\n Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if\n however data are not projected (coordinates are long/lat), the aspect is by\n default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of\n the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat\n square appears square in the middle of the plot. This implies an\n Equirectangular projection. It can also be set manually (float) as the ratio\n of y-unit to x-unit.\n\n **style_kwds : dict\n Style options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n\n \"\"\"\n if \"colormap\" in style_kwds:\n warnings.warn(\n \"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\",\n FutureWarning,\n )\n cmap = style_kwds.pop(\"colormap\")\n if \"axes\" in style_kwds:\n warnings.warn(\n \"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\",\n FutureWarning,\n )\n ax = style_kwds.pop(\"axes\")\n if column is not None and color is not None:\n warnings.warn(\n \"Only specify one of 'column' or 'color'. Using 'color'.\", UserWarning\n )\n column = None\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\n \"The matplotlib package is required for plotting in geopandas. \"\n \"You can install it using 'conda install -c conda-forge matplotlib' or \"\n \"'pip install matplotlib'.\"\n )\n\n if ax is None:\n if cax is not None:\n raise ValueError(\"'ax' can not be None if 'cax' is not.\")\n fig, ax = plt.subplots(figsize=figsize)\n\n if aspect == \"auto\":\n if df.crs and df.crs.is_geographic:\n bounds = df.total_bounds\n y_coord = np.mean([bounds[1], bounds[3]])\n ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))\n # formula ported from R package sp\n # https://github.com/edzer/sp/blob/master/R/mapasp.R\n else:\n ax.set_aspect(\"equal\")\n else:\n ax.set_aspect(aspect)\n\n if df.empty:\n warnings.warn(\n \"The GeoDataFrame you are attempting to plot is \"\n \"empty. Nothing has been displayed.\",\n UserWarning,\n )\n return ax\n\n if isinstance(markersize, str):\n markersize = df[markersize].values\n\n if column is None:\n return plot_series(\n df.geometry,\n cmap=cmap,\n color=color,\n ax=ax,\n figsize=figsize,\n markersize=markersize,\n aspect=aspect,\n **style_kwds\n )\n\n # To accept pd.Series and np.arrays as column\n if isinstance(column, (np.ndarray, pd.Series)):\n if column.shape[0] != df.shape[0]:\n raise ValueError(\n \"The dataframe and given column have different number of rows.\"\n )\n else:\n values = column\n else:\n values = df[column]\n\n if pd.api.types.is_categorical_dtype(values.dtype):\n if categories is not None:\n raise ValueError(\n \"Cannot specify 'categories' when column has categorical dtype\"\n )\n categorical = True\n elif values.dtype is np.dtype(\"O\") or categories:\n categorical = True\n\n nan_idx = np.asarray(pd.isna(values), dtype=\"bool\")\n\n # Define `values` as a Series\n if categorical:\n if cmap is None:\n cmap = \"tab10\"\n\n cat = pd.Categorical(values, categories=categories)\n categories = list(cat.categories)\n\n # values missing in the Categorical but not in original values\n missing = list(np.unique(values[~nan_idx & cat.isna()]))\n if missing:\n raise ValueError(\n \"Column contains values not listed in categories. \"\n \"Missing categories: {}.\".format(missing)\n )\n\n values = cat.codes[~nan_idx]\n vmin = 0 if vmin is None else vmin\n vmax = len(categories) - 1 if vmax is None else vmax\n\n if scheme is not None:\n if classification_kwds is None:\n classification_kwds = {}\n if \"k\" not in classification_kwds:\n classification_kwds[\"k\"] = k\n\n binning = _mapclassify_choro(values[~nan_idx], scheme, **classification_kwds)\n # set categorical to True for creating the legend\n categorical = True\n if legend_kwds is not None and \"labels\" in legend_kwds:\n if len(legend_kwds[\"labels\"]) != binning.k:\n raise ValueError(\n \"Number of labels must match number of bins, \"\n \"received {} labels for {} bins\".format(\n len(legend_kwds[\"labels\"]), binning.k\n )\n )\n else:\n categories = list(legend_kwds.pop(\"labels\"))\n else:\n fmt = \"{:.2f}\"\n if legend_kwds is not None and \"fmt\" in legend_kwds:\n fmt = legend_kwds.pop(\"fmt\")\n categories = binning.get_legend_classes(fmt)\n values = np.array(binning.yb)\n\n # fill values with placeholder where were NaNs originally to map them properly\n # (after removing them in categorical or scheme)\n if categorical:\n for n in np.where(nan_idx)[0]:\n values = np.insert(values, n, values[0])\n\n mn = values[~np.isnan(values)].min() if vmin is None else vmin\n mx = values[~np.isnan(values)].max() if vmax is None else vmax\n\n # decompose GeometryCollections\n geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix=\"Geom\")\n values = np.take(values, multiindex, axis=0)\n nan_idx = np.take(nan_idx, multiindex, axis=0)\n expl_series = geopandas.GeoSeries(geoms)\n\n geom_types = expl_series.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"MultiLineString\")\n | (geom_types == \"LinearRing\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = expl_series[poly_idx & np.invert(nan_idx)]\n subset = values[poly_idx & np.invert(nan_idx)]\n if not polys.empty:\n _plot_polygon_collection(\n ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds\n )\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = expl_series[line_idx & np.invert(nan_idx)]\n subset = values[line_idx & np.invert(nan_idx)]\n if not lines.empty:\n _plot_linestring_collection(\n ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds\n )\n\n # plot all Points in the same collection\n points = expl_series[point_idx & np.invert(nan_idx)]\n subset = values[point_idx & np.invert(nan_idx)]\n if not points.empty:\n if isinstance(markersize, np.ndarray):\n markersize = np.take(markersize, multiindex, axis=0)\n markersize = markersize[point_idx & np.invert(nan_idx)]\n _plot_point_collection(\n ax,\n points,\n subset,\n vmin=mn,\n vmax=mx,\n markersize=markersize,\n cmap=cmap,\n **style_kwds\n )\n\n if missing_kwds is not None:\n if color:\n if \"color\" not in missing_kwds:\n missing_kwds[\"color\"] = color\n\n merged_kwds = style_kwds.copy()\n merged_kwds.update(missing_kwds)\n\n plot_series(expl_series[nan_idx], ax=ax, **merged_kwds)\n\n if legend and not color:\n\n if legend_kwds is None:\n legend_kwds = {}\n if \"fmt\" in legend_kwds:\n legend_kwds.pop(\"fmt\")\n\n from matplotlib.lines import Line2D\n from matplotlib.colors import Normalize\n from matplotlib import cm\n\n norm = style_kwds.get(\"norm\", None)\n if not norm:\n norm = Normalize(vmin=mn, vmax=mx)\n n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n if categorical:\n patches = []\n for value, cat in enumerate(categories):\n patches.append(\n Line2D(\n [0],\n [0],\n linestyle=\"none\",\n marker=\"o\",\n alpha=style_kwds.get(\"alpha\", 1),\n markersize=10,\n markerfacecolor=n_cmap.to_rgba(value),\n markeredgewidth=0,\n )\n )\n if missing_kwds is not None:\n if \"color\" in merged_kwds:\n merged_kwds[\"facecolor\"] = merged_kwds[\"color\"]\n patches.append(\n Line2D(\n [0],\n [0],\n linestyle=\"none\",\n marker=\"o\",\n alpha=merged_kwds.get(\"alpha\", 1),\n markersize=10,\n markerfacecolor=merged_kwds.get(\"facecolor\", None),\n markeredgecolor=merged_kwds.get(\"edgecolor\", None),\n markeredgewidth=merged_kwds.get(\n \"linewidth\", 1 if merged_kwds.get(\"edgecolor\", False) else 0\n ),\n )\n )\n categories.append(merged_kwds.get(\"label\", \"NaN\"))\n legend_kwds.setdefault(\"numpoints\", 1)\n legend_kwds.setdefault(\"loc\", \"best\")\n ax.legend(patches, categories, **legend_kwds)\n else:\n\n if cax is not None:\n legend_kwds.setdefault(\"cax\", cax)\n else:\n legend_kwds.setdefault(\"ax\", ax)\n\n n_cmap.set_array([])\n ax.get_figure().colorbar(n_cmap, **legend_kwds)\n\n plt.draw()\n return ax\n\n\ndef _mapclassify_choro(values, scheme, **classification_kwds):\n \"\"\"\n Wrapper for choropleth schemes from mapclassify for use with plot_dataframe\n\n Parameters\n ----------\n values\n Series to be plotted\n scheme : str\n One of mapclassify classification schemes\n Options are BoxPlot, EqualInterval, FisherJenks,\n FisherJenksSampled, HeadTailBreaks, JenksCaspall,\n JenksCaspallForced, JenksCaspallSampled, MaxP,\n MaximumBreaks, NaturalBreaks, Quantiles, Percentiles, StdMean,\n UserDefined\n\n **classification_kwds : dict\n Keyword arguments for classification scheme\n For details see mapclassify documentation:\n https://pysal.org/mapclassify/api.html\n\n Returns\n -------\n binning\n Binning objects that holds the Series with values replaced with\n class identifier and the bins.\n \"\"\"\n try:\n import mapclassify.classifiers as classifiers\n\n except ImportError:\n raise ImportError(\n \"The 'mapclassify' >= 2.2.0 package is required to use the 'scheme' keyword\"\n )\n from mapclassify import __version__ as mc_version\n\n if mc_version < LooseVersion(\"2.2.0\"):\n raise ImportError(\n \"The 'mapclassify' >= 2.2.0 package is required to \"\n \"use the 'scheme' keyword\"\n )\n schemes = {}\n for classifier in classifiers.CLASSIFIERS:\n schemes[classifier.lower()] = getattr(classifiers, classifier)\n\n scheme = scheme.lower()\n\n # mapclassify < 2.1 cleaned up the scheme names (removing underscores)\n # trying both to keep compatibility with older versions and provide\n # compatibility with newer versions of mapclassify\n oldnew = {\n \"Box_Plot\": \"BoxPlot\",\n \"Equal_Interval\": \"EqualInterval\",\n \"Fisher_Jenks\": \"FisherJenks\",\n \"Fisher_Jenks_Sampled\": \"FisherJenksSampled\",\n \"HeadTail_Breaks\": \"HeadTailBreaks\",\n \"Jenks_Caspall\": \"JenksCaspall\",\n \"Jenks_Caspall_Forced\": \"JenksCaspallForced\",\n \"Jenks_Caspall_Sampled\": \"JenksCaspallSampled\",\n \"Max_P_Plassifier\": \"MaxP\",\n \"Maximum_Breaks\": \"MaximumBreaks\",\n \"Natural_Breaks\": \"NaturalBreaks\",\n \"Std_Mean\": \"StdMean\",\n \"User_Defined\": \"UserDefined\",\n }\n scheme_names_mapping = {}\n scheme_names_mapping.update(\n {old.lower(): new.lower() for old, new in oldnew.items()}\n )\n scheme_names_mapping.update(\n {new.lower(): old.lower() for old, new in oldnew.items()}\n )\n\n try:\n scheme_class = schemes[scheme]\n except KeyError:\n scheme = scheme_names_mapping.get(scheme, scheme)\n try:\n scheme_class = schemes[scheme]\n except KeyError:\n raise ValueError(\n \"Invalid scheme. Scheme must be in the set: %r\" % schemes.keys()\n )\n\n if classification_kwds[\"k\"] is not None:\n from inspect import getfullargspec as getspec\n\n spec = getspec(scheme_class.__init__)\n if \"k\" not in spec.args:\n del classification_kwds[\"k\"]\n try:\n binning = scheme_class(np.asarray(values), **classification_kwds)\n except TypeError:\n raise TypeError(\"Invalid keyword argument for %r \" % scheme)\n return binning\n", "path": "geopandas/plotting.py" } ]
[ { "content": "import warnings\n\nimport numpy as np\nimport pandas as pd\n\nimport geopandas\n\nfrom distutils.version import LooseVersion\n\n\ndef deprecated(new):\n \"\"\"Helper to provide deprecation warning.\"\"\"\n\n def old(*args, **kwargs):\n warnings.warn(\n \"{} is intended for internal \".format(new.__name__[1:])\n + \"use only, and will be deprecated.\",\n DeprecationWarning,\n stacklevel=2,\n )\n new(*args, **kwargs)\n\n return old\n\n\ndef _flatten_multi_geoms(geoms, prefix=\"Multi\"):\n \"\"\"\n Returns Series like geoms and index, except that any Multi geometries\n are split into their components and indices are repeated for all component\n in the same Multi geometry. Maintains 1:1 matching of geometry to value.\n\n Prefix specifies type of geometry to be flatten. 'Multi' for MultiPoint and similar,\n \"Geom\" for GeometryCollection.\n\n Returns\n -------\n components : list of geometry\n\n component_index : index array\n indices are repeated for all components in the same Multi geometry\n \"\"\"\n components, component_index = [], []\n\n if not geoms.geom_type.str.startswith(prefix).any():\n return geoms, np.arange(len(geoms))\n\n for ix, geom in enumerate(geoms):\n if geom.type.startswith(prefix):\n for poly in geom:\n components.append(poly)\n component_index.append(ix)\n else:\n components.append(geom)\n component_index.append(ix)\n\n return components, np.array(component_index)\n\n\ndef _expand_kwargs(kwargs, multiindex):\n \"\"\"\n Most arguments to the plot functions must be a (single) value, or a sequence\n of values. This function checks each key-value pair in 'kwargs' and expands\n it (in place) to the correct length/formats with help of 'multiindex', unless\n the value appears to already be a valid (single) value for the key.\n \"\"\"\n from matplotlib.colors import is_color_like\n from typing import Iterable\n\n for att, value in kwargs.items():\n if \"color\" in att: # color(s), edgecolor(s), facecolor(s)\n if is_color_like(value):\n continue\n elif \"linestyle\" in att: # linestyle(s)\n # A single linestyle can be 2-tuple of a number and an iterable.\n if (\n isinstance(value, tuple)\n and len(value) == 2\n and isinstance(value[1], Iterable)\n ):\n continue\n elif att in [\"marker\", \"alpha\"]:\n # For these attributes, only a single value is allowed, so never expand.\n continue\n\n if pd.api.types.is_list_like(value):\n kwargs[att] = np.take(value, multiindex, axis=0)\n\n\ndef _plot_polygon_collection(\n ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs\n):\n \"\"\"\n Plots a collection of Polygon and MultiPolygon geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)\n\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n Otherwise follows `color` / `facecolor` kwargs.\n edgecolor : single color or sequence of `N` colors\n Color for the edge of the polygons\n facecolor : single color or sequence of `N` colors\n Color to fill the polygons. Cannot be used together with `values`.\n color : single color or sequence of `N` colors\n Sets both `edgecolor` and `facecolor`\n **kwargs\n Additional keyword arguments passed to the collection\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n\n try:\n from descartes.patch import PolygonPatch\n except ImportError:\n raise ImportError(\n \"The descartes package is required for plotting polygons in geopandas. \"\n \"You can install it using 'conda install -c conda-forge descartes' or \"\n \"'pip install descartes'.\"\n )\n from matplotlib.collections import PatchCollection\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n # PatchCollection does not accept some kwargs.\n kwargs = {\n att: value\n for att, value in kwargs.items()\n if att not in [\"markersize\", \"marker\"]\n }\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n\n _expand_kwargs(kwargs, multiindex)\n\n collection = PatchCollection([PolygonPatch(poly) for poly in geoms], **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n if \"norm\" not in kwargs:\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\nplot_polygon_collection = deprecated(_plot_polygon_collection)\n\n\ndef _plot_linestring_collection(\n ax, geoms, values=None, color=None, cmap=None, vmin=None, vmax=None, **kwargs\n):\n \"\"\"\n Plots a collection of LineString and MultiLineString geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be\n mixed)\n values : a sequence of `N` values, optional\n Values will be mapped to colors using vmin/vmax/cmap. They should\n have 1:1 correspondence with the geometries (not their components).\n color : single color or sequence of `N` colors\n Cannot be used together with `values`.\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n from matplotlib.collections import LineCollection\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n # LineCollection does not accept some kwargs.\n kwargs = {\n att: value\n for att, value in kwargs.items()\n if att not in [\"markersize\", \"marker\"]\n }\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n\n _expand_kwargs(kwargs, multiindex)\n\n segments = [np.array(linestring)[:, :2] for linestring in geoms]\n collection = LineCollection(segments, **kwargs)\n\n if values is not None:\n collection.set_array(np.asarray(values))\n collection.set_cmap(cmap)\n if \"norm\" not in kwargs:\n collection.set_clim(vmin, vmax)\n\n ax.add_collection(collection, autolim=True)\n ax.autoscale_view()\n return collection\n\n\nplot_linestring_collection = deprecated(_plot_linestring_collection)\n\n\ndef _plot_point_collection(\n ax,\n geoms,\n values=None,\n color=None,\n cmap=None,\n vmin=None,\n vmax=None,\n marker=\"o\",\n markersize=None,\n **kwargs\n):\n \"\"\"\n Plots a collection of Point and MultiPoint geometries to `ax`\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n where shapes will be plotted\n geoms : sequence of `N` Points or MultiPoints\n\n values : a sequence of `N` values, optional\n Values mapped to colors using vmin, vmax, and cmap.\n Cannot be specified together with `color`.\n markersize : scalar or array-like, optional\n Size of the markers. Note that under the hood ``scatter`` is\n used, so the specified value will be proportional to the\n area of the marker (size in points^2).\n\n Returns\n -------\n collection : matplotlib.collections.Collection that was plotted\n \"\"\"\n if values is not None and color is not None:\n raise ValueError(\"Can only specify one of 'values' and 'color' kwargs\")\n\n geoms, multiindex = _flatten_multi_geoms(geoms)\n if values is not None:\n values = np.take(values, multiindex, axis=0)\n\n x = [p.x for p in geoms]\n y = [p.y for p in geoms]\n\n # matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None\n if values is not None:\n kwargs[\"c\"] = values\n if markersize is not None:\n kwargs[\"s\"] = markersize\n\n # Add to kwargs for easier checking below.\n if color is not None:\n kwargs[\"color\"] = color\n if marker is not None:\n kwargs[\"marker\"] = marker\n _expand_kwargs(kwargs, multiindex)\n\n if \"norm\" not in kwargs:\n collection = ax.scatter(x, y, vmin=vmin, vmax=vmax, cmap=cmap, **kwargs)\n else:\n collection = ax.scatter(x, y, cmap=cmap, **kwargs)\n\n return collection\n\n\nplot_point_collection = deprecated(_plot_point_collection)\n\n\ndef plot_series(\n s, cmap=None, color=None, ax=None, figsize=None, aspect=\"auto\", **style_kwds\n):\n \"\"\"\n Plot a GeoSeries.\n\n Generate a plot of a GeoSeries geometry with matplotlib.\n\n Parameters\n ----------\n s : Series\n The GeoSeries to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib. Any\n colormap will work, but categorical colormaps are\n generally recommended. Examples of useful discrete\n colormaps include:\n\n tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2\n\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n figsize : pair of floats (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n ax is given explicitly, figsize is ignored.\n aspect : 'auto', 'equal' or float (default 'auto')\n Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if\n however data are not projected (coordinates are long/lat), the aspect is by\n default set to 1/cos(s_y * pi/180) with s_y the y coordinate of the middle of\n the GeoSeries (the mean of the y range of bounding box) so that a long/lat\n square appears square in the middle of the plot. This implies an\n Equirectangular projection. It can also be set manually (float) as the ratio\n of y-unit to x-unit.\n **style_kwds : dict\n Color options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n \"\"\"\n if \"colormap\" in style_kwds:\n warnings.warn(\n \"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\",\n FutureWarning,\n )\n cmap = style_kwds.pop(\"colormap\")\n if \"axes\" in style_kwds:\n warnings.warn(\n \"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\",\n FutureWarning,\n )\n ax = style_kwds.pop(\"axes\")\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\n \"The matplotlib package is required for plotting in geopandas. \"\n \"You can install it using 'conda install -c conda-forge matplotlib' or \"\n \"'pip install matplotlib'.\"\n )\n\n if ax is None:\n fig, ax = plt.subplots(figsize=figsize)\n\n if aspect == \"auto\":\n if s.crs and s.crs.is_geographic:\n bounds = s.total_bounds\n y_coord = np.mean([bounds[1], bounds[3]])\n ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))\n # formula ported from R package sp\n # https://github.com/edzer/sp/blob/master/R/mapasp.R\n else:\n ax.set_aspect(\"equal\")\n else:\n ax.set_aspect(aspect)\n\n if s.empty:\n warnings.warn(\n \"The GeoSeries you are attempting to plot is \"\n \"empty. Nothing has been displayed.\",\n UserWarning,\n )\n return ax\n\n # if cmap is specified, create range of colors based on cmap\n values = None\n if cmap is not None:\n values = np.arange(len(s))\n if hasattr(cmap, \"N\"):\n values = values % cmap.N\n style_kwds[\"vmin\"] = style_kwds.get(\"vmin\", values.min())\n style_kwds[\"vmax\"] = style_kwds.get(\"vmax\", values.max())\n\n # decompose GeometryCollections\n geoms, multiindex = _flatten_multi_geoms(s.geometry, prefix=\"Geom\")\n values = np.take(values, multiindex, axis=0) if cmap else None\n expl_series = geopandas.GeoSeries(geoms)\n\n geom_types = expl_series.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"MultiLineString\")\n | (geom_types == \"LinearRing\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = expl_series[poly_idx]\n if not polys.empty:\n # color overrides both face and edgecolor. As we want people to be\n # able to use edgecolor as well, pass color to facecolor\n facecolor = style_kwds.pop(\"facecolor\", None)\n if color is not None:\n facecolor = color\n\n values_ = values[poly_idx] if cmap else None\n _plot_polygon_collection(\n ax, polys, values_, facecolor=facecolor, cmap=cmap, **style_kwds\n )\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = expl_series[line_idx]\n if not lines.empty:\n values_ = values[line_idx] if cmap else None\n _plot_linestring_collection(\n ax, lines, values_, color=color, cmap=cmap, **style_kwds\n )\n\n # plot all Points in the same collection\n points = expl_series[point_idx]\n if not points.empty:\n values_ = values[point_idx] if cmap else None\n _plot_point_collection(\n ax, points, values_, color=color, cmap=cmap, **style_kwds\n )\n\n plt.draw()\n return ax\n\n\ndef plot_dataframe(\n df,\n column=None,\n cmap=None,\n color=None,\n ax=None,\n cax=None,\n categorical=False,\n legend=False,\n scheme=None,\n k=5,\n vmin=None,\n vmax=None,\n markersize=None,\n figsize=None,\n legend_kwds=None,\n categories=None,\n classification_kwds=None,\n missing_kwds=None,\n aspect=\"auto\",\n **style_kwds\n):\n \"\"\"\n Plot a GeoDataFrame.\n\n Generate a plot of a GeoDataFrame with matplotlib. If a\n column is specified, the plot coloring will be based on values\n in that column.\n\n Parameters\n ----------\n df : GeoDataFrame\n The GeoDataFrame to be plotted. Currently Polygon,\n MultiPolygon, LineString, MultiLineString and Point\n geometries can be plotted.\n column : str, np.array, pd.Series (default None)\n The name of the dataframe column, np.array, or pd.Series to be plotted.\n If np.array or pd.Series are used then it must have same length as\n dataframe. Values are used to color the plot. Ignored if `color` is\n also set.\n cmap : str (default None)\n The name of a colormap recognized by matplotlib.\n color : str (default None)\n If specified, all objects will be colored uniformly.\n ax : matplotlib.pyplot.Artist (default None)\n axes on which to draw the plot\n cax : matplotlib.pyplot Artist (default None)\n axes on which to draw the legend in case of color map.\n categorical : bool (default False)\n If False, cmap will reflect numerical values of the\n column being plotted. For non-numerical columns, this\n will be set to True.\n legend : bool (default False)\n Plot a legend. Ignored if no `column` is given, or if `color` is given.\n scheme : str (default None)\n Name of a choropleth classification scheme (requires mapclassify).\n A mapclassify.MapClassifier object will be used\n under the hood. Supported are all schemes provided by mapclassify (e.g.\n 'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled',\n 'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced',\n 'JenksCaspallSampled', 'MaxP', 'MaximumBreaks',\n 'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean',\n 'UserDefined'). Arguments can be passed in classification_kwds.\n k : int (default 5)\n Number of classes (ignored if scheme is None)\n vmin : None or float (default None)\n Minimum value of cmap. If None, the minimum data value\n in the column to be plotted is used.\n vmax : None or float (default None)\n Maximum value of cmap. If None, the maximum data value\n in the column to be plotted is used.\n markersize : str or float or sequence (default None)\n Only applies to point geometries within a frame.\n If a str, will use the values in the column of the frame specified\n by markersize to set the size of markers. Otherwise can be a value\n to apply to all points, or a sequence of the same length as the\n number of points.\n figsize : tuple of integers (default None)\n Size of the resulting matplotlib.figure.Figure. If the argument\n axes is given explicitly, figsize is ignored.\n legend_kwds : dict (default None)\n Keyword arguments to pass to matplotlib.pyplot.legend() or\n matplotlib.pyplot.colorbar().\n Additional accepted keywords when `scheme` is specified:\n\n fmt : string\n A formatting specification for the bin edges of the classes in the\n legend. For example, to have no decimals: ``{\"fmt\": \"{:.0f}\"}``.\n labels : list-like\n A list of legend labels to override the auto-generated labels.\n Needs to have the same number of elements as the number of\n classes (`k`).\n categories : list-like\n Ordered list-like object of categories to be used for categorical plot.\n classification_kwds : dict (default None)\n Keyword arguments to pass to mapclassify\n missing_kwds : dict (default None)\n Keyword arguments specifying color options (as style_kwds)\n to be passed on to geometries with missing values in addition to\n or overwriting other style kwds. If None, geometries with missing\n values are not plotted.\n aspect : 'auto', 'equal' or float (default 'auto')\n Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if\n however data are not projected (coordinates are long/lat), the aspect is by\n default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of\n the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat\n square appears square in the middle of the plot. This implies an\n Equirectangular projection. It can also be set manually (float) as the ratio\n of y-unit to x-unit.\n\n **style_kwds : dict\n Style options to be passed on to the actual plot function, such\n as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,\n ``alpha``.\n\n Returns\n -------\n ax : matplotlib axes instance\n\n \"\"\"\n if \"colormap\" in style_kwds:\n warnings.warn(\n \"'colormap' is deprecated, please use 'cmap' instead \"\n \"(for consistency with matplotlib)\",\n FutureWarning,\n )\n cmap = style_kwds.pop(\"colormap\")\n if \"axes\" in style_kwds:\n warnings.warn(\n \"'axes' is deprecated, please use 'ax' instead \"\n \"(for consistency with pandas)\",\n FutureWarning,\n )\n ax = style_kwds.pop(\"axes\")\n if column is not None and color is not None:\n warnings.warn(\n \"Only specify one of 'column' or 'color'. Using 'color'.\", UserWarning\n )\n column = None\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\n \"The matplotlib package is required for plotting in geopandas. \"\n \"You can install it using 'conda install -c conda-forge matplotlib' or \"\n \"'pip install matplotlib'.\"\n )\n\n if ax is None:\n if cax is not None:\n raise ValueError(\"'ax' can not be None if 'cax' is not.\")\n fig, ax = plt.subplots(figsize=figsize)\n\n if aspect == \"auto\":\n if df.crs and df.crs.is_geographic:\n bounds = df.total_bounds\n y_coord = np.mean([bounds[1], bounds[3]])\n ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))\n # formula ported from R package sp\n # https://github.com/edzer/sp/blob/master/R/mapasp.R\n else:\n ax.set_aspect(\"equal\")\n else:\n ax.set_aspect(aspect)\n\n # GH 1555\n # if legend_kwds set, copy so we don't update it in place\n if legend_kwds is not None:\n legend_kwds = legend_kwds.copy()\n\n if df.empty:\n warnings.warn(\n \"The GeoDataFrame you are attempting to plot is \"\n \"empty. Nothing has been displayed.\",\n UserWarning,\n )\n return ax\n\n if isinstance(markersize, str):\n markersize = df[markersize].values\n\n if column is None:\n return plot_series(\n df.geometry,\n cmap=cmap,\n color=color,\n ax=ax,\n figsize=figsize,\n markersize=markersize,\n aspect=aspect,\n **style_kwds\n )\n\n # To accept pd.Series and np.arrays as column\n if isinstance(column, (np.ndarray, pd.Series)):\n if column.shape[0] != df.shape[0]:\n raise ValueError(\n \"The dataframe and given column have different number of rows.\"\n )\n else:\n values = column\n else:\n values = df[column]\n\n if pd.api.types.is_categorical_dtype(values.dtype):\n if categories is not None:\n raise ValueError(\n \"Cannot specify 'categories' when column has categorical dtype\"\n )\n categorical = True\n elif values.dtype is np.dtype(\"O\") or categories:\n categorical = True\n\n nan_idx = np.asarray(pd.isna(values), dtype=\"bool\")\n\n # Define `values` as a Series\n if categorical:\n if cmap is None:\n cmap = \"tab10\"\n\n cat = pd.Categorical(values, categories=categories)\n categories = list(cat.categories)\n\n # values missing in the Categorical but not in original values\n missing = list(np.unique(values[~nan_idx & cat.isna()]))\n if missing:\n raise ValueError(\n \"Column contains values not listed in categories. \"\n \"Missing categories: {}.\".format(missing)\n )\n\n values = cat.codes[~nan_idx]\n vmin = 0 if vmin is None else vmin\n vmax = len(categories) - 1 if vmax is None else vmax\n\n if scheme is not None:\n if classification_kwds is None:\n classification_kwds = {}\n if \"k\" not in classification_kwds:\n classification_kwds[\"k\"] = k\n\n binning = _mapclassify_choro(values[~nan_idx], scheme, **classification_kwds)\n # set categorical to True for creating the legend\n categorical = True\n if legend_kwds is not None and \"labels\" in legend_kwds:\n if len(legend_kwds[\"labels\"]) != binning.k:\n raise ValueError(\n \"Number of labels must match number of bins, \"\n \"received {} labels for {} bins\".format(\n len(legend_kwds[\"labels\"]), binning.k\n )\n )\n else:\n categories = list(legend_kwds.pop(\"labels\"))\n else:\n fmt = \"{:.2f}\"\n if legend_kwds is not None and \"fmt\" in legend_kwds:\n fmt = legend_kwds.pop(\"fmt\")\n categories = binning.get_legend_classes(fmt)\n values = np.array(binning.yb)\n\n # fill values with placeholder where were NaNs originally to map them properly\n # (after removing them in categorical or scheme)\n if categorical:\n for n in np.where(nan_idx)[0]:\n values = np.insert(values, n, values[0])\n\n mn = values[~np.isnan(values)].min() if vmin is None else vmin\n mx = values[~np.isnan(values)].max() if vmax is None else vmax\n\n # decompose GeometryCollections\n geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix=\"Geom\")\n values = np.take(values, multiindex, axis=0)\n nan_idx = np.take(nan_idx, multiindex, axis=0)\n expl_series = geopandas.GeoSeries(geoms)\n\n geom_types = expl_series.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"MultiLineString\")\n | (geom_types == \"LinearRing\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n\n # plot all Polygons and all MultiPolygon components in the same collection\n polys = expl_series[poly_idx & np.invert(nan_idx)]\n subset = values[poly_idx & np.invert(nan_idx)]\n if not polys.empty:\n _plot_polygon_collection(\n ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds\n )\n\n # plot all LineStrings and MultiLineString components in same collection\n lines = expl_series[line_idx & np.invert(nan_idx)]\n subset = values[line_idx & np.invert(nan_idx)]\n if not lines.empty:\n _plot_linestring_collection(\n ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds\n )\n\n # plot all Points in the same collection\n points = expl_series[point_idx & np.invert(nan_idx)]\n subset = values[point_idx & np.invert(nan_idx)]\n if not points.empty:\n if isinstance(markersize, np.ndarray):\n markersize = np.take(markersize, multiindex, axis=0)\n markersize = markersize[point_idx & np.invert(nan_idx)]\n _plot_point_collection(\n ax,\n points,\n subset,\n vmin=mn,\n vmax=mx,\n markersize=markersize,\n cmap=cmap,\n **style_kwds\n )\n\n if missing_kwds is not None:\n if color:\n if \"color\" not in missing_kwds:\n missing_kwds[\"color\"] = color\n\n merged_kwds = style_kwds.copy()\n merged_kwds.update(missing_kwds)\n\n plot_series(expl_series[nan_idx], ax=ax, **merged_kwds)\n\n if legend and not color:\n\n if legend_kwds is None:\n legend_kwds = {}\n if \"fmt\" in legend_kwds:\n legend_kwds.pop(\"fmt\")\n\n from matplotlib.lines import Line2D\n from matplotlib.colors import Normalize\n from matplotlib import cm\n\n norm = style_kwds.get(\"norm\", None)\n if not norm:\n norm = Normalize(vmin=mn, vmax=mx)\n n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)\n if categorical:\n patches = []\n for value, cat in enumerate(categories):\n patches.append(\n Line2D(\n [0],\n [0],\n linestyle=\"none\",\n marker=\"o\",\n alpha=style_kwds.get(\"alpha\", 1),\n markersize=10,\n markerfacecolor=n_cmap.to_rgba(value),\n markeredgewidth=0,\n )\n )\n if missing_kwds is not None:\n if \"color\" in merged_kwds:\n merged_kwds[\"facecolor\"] = merged_kwds[\"color\"]\n patches.append(\n Line2D(\n [0],\n [0],\n linestyle=\"none\",\n marker=\"o\",\n alpha=merged_kwds.get(\"alpha\", 1),\n markersize=10,\n markerfacecolor=merged_kwds.get(\"facecolor\", None),\n markeredgecolor=merged_kwds.get(\"edgecolor\", None),\n markeredgewidth=merged_kwds.get(\n \"linewidth\", 1 if merged_kwds.get(\"edgecolor\", False) else 0\n ),\n )\n )\n categories.append(merged_kwds.get(\"label\", \"NaN\"))\n legend_kwds.setdefault(\"numpoints\", 1)\n legend_kwds.setdefault(\"loc\", \"best\")\n ax.legend(patches, categories, **legend_kwds)\n else:\n\n if cax is not None:\n legend_kwds.setdefault(\"cax\", cax)\n else:\n legend_kwds.setdefault(\"ax\", ax)\n\n n_cmap.set_array([])\n ax.get_figure().colorbar(n_cmap, **legend_kwds)\n\n plt.draw()\n return ax\n\n\ndef _mapclassify_choro(values, scheme, **classification_kwds):\n \"\"\"\n Wrapper for choropleth schemes from mapclassify for use with plot_dataframe\n\n Parameters\n ----------\n values\n Series to be plotted\n scheme : str\n One of mapclassify classification schemes\n Options are BoxPlot, EqualInterval, FisherJenks,\n FisherJenksSampled, HeadTailBreaks, JenksCaspall,\n JenksCaspallForced, JenksCaspallSampled, MaxP,\n MaximumBreaks, NaturalBreaks, Quantiles, Percentiles, StdMean,\n UserDefined\n\n **classification_kwds : dict\n Keyword arguments for classification scheme\n For details see mapclassify documentation:\n https://pysal.org/mapclassify/api.html\n\n Returns\n -------\n binning\n Binning objects that holds the Series with values replaced with\n class identifier and the bins.\n \"\"\"\n try:\n import mapclassify.classifiers as classifiers\n\n except ImportError:\n raise ImportError(\n \"The 'mapclassify' >= 2.2.0 package is required to use the 'scheme' keyword\"\n )\n from mapclassify import __version__ as mc_version\n\n if mc_version < LooseVersion(\"2.2.0\"):\n raise ImportError(\n \"The 'mapclassify' >= 2.2.0 package is required to \"\n \"use the 'scheme' keyword\"\n )\n schemes = {}\n for classifier in classifiers.CLASSIFIERS:\n schemes[classifier.lower()] = getattr(classifiers, classifier)\n\n scheme = scheme.lower()\n\n # mapclassify < 2.1 cleaned up the scheme names (removing underscores)\n # trying both to keep compatibility with older versions and provide\n # compatibility with newer versions of mapclassify\n oldnew = {\n \"Box_Plot\": \"BoxPlot\",\n \"Equal_Interval\": \"EqualInterval\",\n \"Fisher_Jenks\": \"FisherJenks\",\n \"Fisher_Jenks_Sampled\": \"FisherJenksSampled\",\n \"HeadTail_Breaks\": \"HeadTailBreaks\",\n \"Jenks_Caspall\": \"JenksCaspall\",\n \"Jenks_Caspall_Forced\": \"JenksCaspallForced\",\n \"Jenks_Caspall_Sampled\": \"JenksCaspallSampled\",\n \"Max_P_Plassifier\": \"MaxP\",\n \"Maximum_Breaks\": \"MaximumBreaks\",\n \"Natural_Breaks\": \"NaturalBreaks\",\n \"Std_Mean\": \"StdMean\",\n \"User_Defined\": \"UserDefined\",\n }\n scheme_names_mapping = {}\n scheme_names_mapping.update(\n {old.lower(): new.lower() for old, new in oldnew.items()}\n )\n scheme_names_mapping.update(\n {new.lower(): old.lower() for old, new in oldnew.items()}\n )\n\n try:\n scheme_class = schemes[scheme]\n except KeyError:\n scheme = scheme_names_mapping.get(scheme, scheme)\n try:\n scheme_class = schemes[scheme]\n except KeyError:\n raise ValueError(\n \"Invalid scheme. Scheme must be in the set: %r\" % schemes.keys()\n )\n\n if classification_kwds[\"k\"] is not None:\n from inspect import getfullargspec as getspec\n\n spec = getspec(scheme_class.__init__)\n if \"k\" not in spec.args:\n del classification_kwds[\"k\"]\n try:\n binning = scheme_class(np.asarray(values), **classification_kwds)\n except TypeError:\n raise TypeError(\"Invalid keyword argument for %r \" % scheme)\n return binning\n", "path": "geopandas/plotting.py" } ]
diff --git a/geopandas/plotting.py b/geopandas/plotting.py index b827e7819c..e05d66678f 100644 --- a/geopandas/plotting.py +++ b/geopandas/plotting.py @@ -600,6 +600,11 @@ def plot_dataframe( else: ax.set_aspect(aspect) + # GH 1555 + # if legend_kwds set, copy so we don't update it in place + if legend_kwds is not None: + legend_kwds = legend_kwds.copy() + if df.empty: warnings.warn( "The GeoDataFrame you are attempting to plot is "
zulip__zulip-21420
Document "Manage this user" profile link Administrators now see a "Manage this user" link at the bottom of the user profile modal, which will often be handier than getting to the user management UI through the settings. <img width="300" alt="Screen Shot 2022-03-14 at 3 46 02 PM" src="https://user-images.githubusercontent.com/2090066/158273279-acedbd78-a5b8-4a5a-ae48-cda2eb19e931.png"> We should probably document this link as the primary way to perform certain actions, with the settings approach being an alternative. Affected pages I'm aware of: - https://zulip.com/help/change-a-users-name - https://zulip.com/help/change-a-users-role - https://zulip.com/help/deactivate-or-reactivate-a-user Document user management via profile In #20373, we added the ability to: 1. Manage a user from their profile 2. Deactivate a user from the "manage user" modal We should document this in the Help center. In particular, we should indicate that user management actions (e.g. Change a user's name, etc.) can be done from either the gear menu, or from their profile. (Maybe we can do this by using tabs in the instructions.) For deactivating a user, I think the button inside the modal only needs to be described for the approach where you start from the profile. This page might be a good related article: https://zulip.com/help/view-someones-profile
[ { "content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\n\nfrom zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|\\s*(.+?)\\s*\\}$\")\n\nCODE_SECTION_TEMPLATE = \"\"\"\n<div class=\"code-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-language=\"{data_language}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-language=\"{data_language}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"send-email-invitations\": \"Send email invitations\",\n \"share-an-invite-link\": \"Share an invite link\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-topics\": \"Via recent topics\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_name\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = CODE_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_language=tab[\"tab_name\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_name = tab.get(\"tab_name\")\n tab_label = TAB_SECTION_LABELS.get(tab_name)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_name}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_language=tab_name, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_name\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py" } ]
[ { "content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\n\nfrom zerver.lib.markdown.preprocessor_priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|\\s*(.+?)\\s*\\}$\")\n\nCODE_SECTION_TEMPLATE = \"\"\"\n<div class=\"code-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-language=\"{data_language}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-language=\"{data_language}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"send-email-invitations\": \"Send email invitations\",\n \"share-an-invite-link\": \"Share an invite link\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-topics\": \"Via recent topics\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"via-user-profile\": \"Via the user's profile\",\n \"via-organization-settings\": \"Via organization settings\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_name\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = CODE_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_language=tab[\"tab_name\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_name = tab.get(\"tab_name\")\n tab_label = TAB_SECTION_LABELS.get(tab_name)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_name}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_language=tab_name, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_name\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py" } ]
diff --git a/templates/zerver/help/change-a-users-name.md b/templates/zerver/help/change-a-users-name.md index e4f2353b8e2af..8e7da1ba0747e 100644 --- a/templates/zerver/help/change-a-users-name.md +++ b/templates/zerver/help/change-a-users-name.md @@ -12,10 +12,25 @@ Organization administrators can always change any user's name. {start_tabs} +{tab|via-user-profile} + +1. Hover over a user's name in the right sidebar. + +1. Click on the ellipsis (<i class="zulip-icon zulip-icon-ellipsis-v-solid"></i>) + to the right of their name. + +1. Click **Manage this user**. + +1. Under **Full name**, enter a new name. + +{!save-changes.md!} + +{tab|via-organization-settings} + {settings_tab|user-list-admin} 1. Find the user you would like to manage. Click the **pencil** -(<i class="fa fa-pencil"></i>) to the right of their name. + (<i class="fa fa-pencil"></i>) to the right of their name. 1. Under **Full name**, enter a new name. diff --git a/templates/zerver/help/change-a-users-role.md b/templates/zerver/help/change-a-users-role.md index 7032fa4302acc..1f86c13657241 100644 --- a/templates/zerver/help/change-a-users-role.md +++ b/templates/zerver/help/change-a-users-role.md @@ -15,22 +15,32 @@ there is at least one other owner in the organization (consider promoting a new owner or [deactivating the organization](/help/deactivate-your-organization) instead). -**Changes** Organization owners were introduced in Zulip 3.0; users -that were marked as administrators in older Zulip instances are -automatically converted during the upgrade to Zulip 3.0 into owners -(who have the same permissions as administrators did previously). - ### Change a user's role {start_tabs} +{tab|via-user-profile} + +1. Hover over a user's name in the right sidebar. + +1. Click on the ellipsis (<i class="zulip-icon zulip-icon-ellipsis-v-solid"></i>) + to the right of their name. + +1. Click **Manage this user**. + +1. Under **User role**, select a [role](/help/roles-and-permissions). + +1. Click **Save changes**. The new permissions will take effect immediately. + +{tab|via-organization-settings} + {settings_tab|user-list-admin} 1. Find the user you would like to manage. Click the **pencil** -(<i class="fa fa-pencil"></i>) to the right of their name. + (<i class="fa fa-pencil"></i>) to the right of their name. -1. Under **User role**, select **Owner**, **Administrator**, **Moderators**, **Member** or **Guest**. +1. Under **User role**, select a [role](/help/roles-and-permissions). -1. Click **Save changes**. The new rights will take effect immediately. +1. Click **Save changes**. The new permissions will take effect immediately. {end_tabs} diff --git a/templates/zerver/help/deactivate-or-reactivate-a-user.md b/templates/zerver/help/deactivate-or-reactivate-a-user.md index f35ab8f4e7255..e93e04a600967 100644 --- a/templates/zerver/help/deactivate-or-reactivate-a-user.md +++ b/templates/zerver/help/deactivate-or-reactivate-a-user.md @@ -14,12 +14,27 @@ Note that organization administrators cannot deactivate organization owners. {start_tabs} +{tab|via-user-profile} + +1. Hover over a user's name in the right sidebar. + +1. Click on the ellipsis (<i class="zulip-icon zulip-icon-ellipsis-v-solid"></i>) + to the right of their name. + +1. Click **Manage this user**. + +1. Click the **Deactivate user** button at the bottom. + +1. Approve by clicking **Confirm**. + +{tab|via-organization-settings} + {settings_tab|user-list-admin} - 4. Click the **Deactivate** button to the right of the user account that you -want to deactivate. +1. Click the **Deactivate** button to the right of the user account that you + want to deactivate. -4. Approve by clicking **Confirm**. +1. Approve by clicking **Confirm**. {end_tabs} diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py index 7ad67de22d240..b18f351e3859a 100644 --- a/zerver/lib/markdown/tabbed_sections.py +++ b/zerver/lib/markdown/tabbed_sections.py @@ -76,6 +76,8 @@ "instructions-for-all-platforms": "Instructions for all platforms", "public-streams": "Public streams", "private-streams": "Private streams", + "via-user-profile": "Via the user's profile", + "via-organization-settings": "Via organization settings", }
stephenmcd__mezzanine-446
Issue with custom SLUGIFY method So I try to use slygify from Pytils (https://github.com/j2a/pytils/blob/master/pytils/translit.py#L194 ) like this: ``` SLUGIFY = "pytils.translit.slugify" ``` Every time I save the model that subclasses Displayable I get an error: ``` slugify() got invalid parameter 1 of type NewsItem ``` That's the NewsItem code: ``` from mezzanine.core.fields import RichTextField from mezzanine.core.models import Displayable from mezzanine.core.managers import PublishedManager class NewsItem(Displayable): full_text = RichTextField() objects = PublishedManager() class Meta: ordering = ('-publish_date',) def __unicode__(self): return self.title ``` What could be the problem here?
[ { "content": "\nfrom django.contrib.contenttypes.generic import GenericForeignKey\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models.base import ModelBase\nfrom django.template.defaultfilters import truncatewords_html\nfrom django.utils.html import strip_tags\nfrom django.utils.timesince import timesince\nfrom django.utils.translation import ugettext, ugettext_lazy as _\n\nfrom mezzanine.core.fields import RichTextField\nfrom mezzanine.core.managers import DisplayableManager, CurrentSiteManager\nfrom mezzanine.generic.fields import KeywordsField\nfrom mezzanine.utils.html import TagCloser\nfrom mezzanine.utils.models import base_concrete_model\nfrom mezzanine.utils.sites import current_site_id\nfrom mezzanine.utils.timezone import now\nfrom mezzanine.utils.urls import admin_url, slugify\n\n\nclass SiteRelated(models.Model):\n \"\"\"\n Abstract model for all things site-related. Adds a foreignkey to\n Django's ``Site`` model, and filters by site with all querysets.\n See ``mezzanine.utils.sites.current_site_id`` for implementation\n details.\n \"\"\"\n\n objects = CurrentSiteManager()\n\n class Meta:\n abstract = True\n\n site = models.ForeignKey(\"sites.Site\", editable=False)\n\n def save(self, update_site=False, *args, **kwargs):\n \"\"\"\n Set the site to the current site when the record is first\n created, or the ``update_site`` argument is explicitly set\n to ``True``.\n \"\"\"\n if update_site or not self.id:\n self.site_id = current_site_id()\n super(SiteRelated, self).save(*args, **kwargs)\n\n\nclass Slugged(SiteRelated):\n \"\"\"\n Abstract model that handles auto-generating slugs. Each slugged\n object is also affiliated with a specific site object.\n \"\"\"\n\n title = models.CharField(_(\"Title\"), max_length=500)\n slug = models.CharField(_(\"URL\"), max_length=2000, blank=True, null=True,\n help_text=_(\"Leave blank to have the URL auto-generated from \"\n \"the title.\"))\n\n class Meta:\n abstract = True\n ordering = (\"title\",)\n\n def __unicode__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n \"\"\"\n Create a unique slug by appending an index.\n \"\"\"\n if not self.slug:\n self.slug = self.get_slug()\n # For custom content types, use the ``Page`` instance for\n # slug lookup.\n concrete_model = base_concrete_model(Slugged, self)\n i = 0\n while True:\n if i > 0:\n if i > 1:\n self.slug = self.slug.rsplit(\"-\", 1)[0]\n self.slug = \"%s-%s\" % (self.slug, i)\n qs = concrete_model.objects.all()\n if self.id is not None:\n qs = qs.exclude(id=self.id)\n try:\n qs.get(slug=self.slug)\n except ObjectDoesNotExist:\n break\n i += 1\n super(Slugged, self).save(*args, **kwargs)\n\n def get_slug(self):\n \"\"\"\n Allows subclasses to implement their own slug creation logic.\n \"\"\"\n return slugify(self)\n\n def admin_link(self):\n return \"<a href='%s'>%s</a>\" % (self.get_absolute_url(),\n ugettext(\"View on site\"))\n admin_link.allow_tags = True\n admin_link.short_description = \"\"\n\n\nclass MetaData(models.Model):\n \"\"\"\n Abstract model that provides meta data for content.\n \"\"\"\n\n _meta_title = models.CharField(_(\"Title\"), null=True, blank=True,\n max_length=500,\n help_text=_(\"Optional title to be used in the HTML title tag. \"\n \"If left blank, the main title field will be used.\"))\n description = models.TextField(_(\"Description\"), blank=True)\n gen_description = models.BooleanField(_(\"Generate description\"),\n help_text=_(\"If checked, the description will be automatically \"\n \"generated from content. Uncheck if you want to manually \"\n \"set a custom description.\"), default=True)\n keywords = KeywordsField(verbose_name=_(\"Keywords\"))\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n \"\"\"\n Set the description field on save.\n \"\"\"\n if self.gen_description:\n self.description = strip_tags(self.description_from_content())\n super(MetaData, self).save(*args, **kwargs)\n\n def meta_title(self):\n \"\"\"\n Accessor for the optional ``_meta_title`` field, which returns\n the string version of the instance if not provided.\n \"\"\"\n return self._meta_title or unicode(self)\n\n def description_from_content(self):\n \"\"\"\n Returns the first block or sentence of the first content-like\n field.\n \"\"\"\n description = \"\"\n # Use the first RichTextField, or TextField if none found.\n for field_type in (RichTextField, models.TextField):\n if not description:\n for field in self._meta.fields:\n if isinstance(field, field_type) and \\\n field.name != \"description\":\n description = getattr(self, field.name)\n if description:\n break\n # Fall back to the title if description couldn't be determined.\n if not description:\n description = unicode(self)\n # Strip everything after the first block or sentence.\n ends = (\"</p>\", \"<br />\", \"<br/>\", \"<br>\", \"</ul>\",\n \"\\n\", \". \", \"! \", \"? \")\n for end in ends:\n pos = description.lower().find(end)\n if pos > -1:\n description = TagCloser(description[:pos]).html\n break\n else:\n description = truncatewords_html(description, 100)\n return description\n\n\nCONTENT_STATUS_DRAFT = 1\nCONTENT_STATUS_PUBLISHED = 2\nCONTENT_STATUS_CHOICES = (\n (CONTENT_STATUS_DRAFT, _(\"Draft\")),\n (CONTENT_STATUS_PUBLISHED, _(\"Published\")),\n)\n\n\nclass Displayable(Slugged, MetaData):\n \"\"\"\n Abstract model that provides features of a visible page on the\n website such as publishing fields. Basis of Mezzanine pages,\n blog posts, and Cartridge products.\n \"\"\"\n\n status = models.IntegerField(_(\"Status\"),\n choices=CONTENT_STATUS_CHOICES, default=CONTENT_STATUS_PUBLISHED,\n help_text=_(\"With Draft chosen, will only be shown for admin users \"\n \"on the site.\"))\n publish_date = models.DateTimeField(_(\"Published from\"),\n help_text=_(\"With Published chosen, won't be shown until this time\"),\n blank=True, null=True)\n expiry_date = models.DateTimeField(_(\"Expires on\"),\n help_text=_(\"With Published chosen, won't be shown after this time\"),\n blank=True, null=True)\n short_url = models.URLField(blank=True, null=True)\n\n objects = DisplayableManager()\n search_fields = {\"keywords\": 10, \"title\": 5}\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n \"\"\"\n Set default for ``publish_date``. We can't use ``auto_add`` on\n the field as it will be blank when a blog post is created from\n the quick blog form in the admin dashboard.\n \"\"\"\n if self.publish_date is None:\n self.publish_date = now()\n super(Displayable, self).save(*args, **kwargs)\n\n def get_admin_url(self):\n return admin_url(self, \"change\", self.id)\n\n def publish_date_since(self):\n \"\"\"\n Returns the time since ``publish_date``.\n \"\"\"\n return timesince(self.publish_date)\n publish_date_since.short_description = _(\"Published from\")\n\n def get_absolute_url(self):\n \"\"\"\n Raise an error if called on a subclass without\n ``get_absolute_url`` defined, to ensure all search results\n contains a URL.\n \"\"\"\n name = self.__class__.__name__\n raise NotImplementedError(\"The model %s does not have \"\n \"get_absolute_url defined\" % name)\n\n\nclass RichText(models.Model):\n \"\"\"\n Provides a Rich Text field for managing general content and making\n it searchable.\n \"\"\"\n\n content = RichTextField(_(\"Content\"))\n\n search_fields = (\"content\",)\n\n class Meta:\n abstract = True\n\n\nclass OrderableBase(ModelBase):\n \"\"\"\n Checks for ``order_with_respect_to`` on the model's inner ``Meta``\n class and if found, copies it to a custom attribute and deletes it\n since it will cause errors when used with ``ForeignKey(\"self\")``.\n Also creates the ``ordering`` attribute on the ``Meta`` class if\n not yet provided.\n \"\"\"\n\n def __new__(cls, name, bases, attrs):\n if \"Meta\" not in attrs:\n class Meta:\n pass\n attrs[\"Meta\"] = Meta\n if hasattr(attrs[\"Meta\"], \"order_with_respect_to\"):\n order_field = attrs[\"Meta\"].order_with_respect_to\n attrs[\"order_with_respect_to\"] = order_field\n del attrs[\"Meta\"].order_with_respect_to\n if not hasattr(attrs[\"Meta\"], \"ordering\"):\n setattr(attrs[\"Meta\"], \"ordering\", (\"_order\",))\n return super(OrderableBase, cls).__new__(cls, name, bases, attrs)\n\n\nclass Orderable(models.Model):\n \"\"\"\n Abstract model that provides a custom ordering integer field\n similar to using Meta's ``order_with_respect_to``, since to\n date (Django 1.2) this doesn't work with ``ForeignKey(\"self\")``,\n or with Generic Relations. We may also want this feature for\n models that aren't ordered with respect to a particular field.\n \"\"\"\n\n __metaclass__ = OrderableBase\n\n _order = models.IntegerField(_(\"Order\"), null=True)\n\n class Meta:\n abstract = True\n\n def with_respect_to(self):\n \"\"\"\n Returns a dict to use as a filter for ordering operations\n containing the original ``Meta.order_with_respect_to`` value\n if provided. If the field is a Generic Relation, the dict\n returned contains names and values for looking up the\n relation's ``ct_field`` and ``fk_field`` attributes.\n \"\"\"\n try:\n name = self.order_with_respect_to\n value = getattr(self, name)\n except AttributeError:\n # No ``order_with_respect_to`` specified on the model.\n return {}\n # Support for generic relations.\n field = getattr(self.__class__, name)\n if isinstance(field, GenericForeignKey):\n names = (field.ct_field, field.fk_field)\n return dict([(name, getattr(self, name)) for name in names])\n return {name: value}\n\n def save(self, *args, **kwargs):\n \"\"\"\n Set the initial ordering value.\n \"\"\"\n if self._order is None:\n lookup = self.with_respect_to()\n lookup[\"_order__isnull\"] = False\n concrete_model = base_concrete_model(Orderable, self)\n self._order = concrete_model.objects.filter(**lookup).count()\n super(Orderable, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Update the ordering values for siblings.\n \"\"\"\n lookup = self.with_respect_to()\n lookup[\"_order__gte\"] = self._order\n concrete_model = base_concrete_model(Orderable, self)\n after = concrete_model.objects.filter(**lookup)\n after.update(_order=models.F(\"_order\") - 1)\n super(Orderable, self).delete(*args, **kwargs)\n\n def adjacent_by_order(self, direction):\n \"\"\"\n Retrieves next object by order in the given direction.\n \"\"\"\n lookup = self.with_respect_to()\n lookup[\"_order\"] = self._order + direction\n concrete_model = base_concrete_model(Orderable, self)\n try:\n return concrete_model.objects.get(**lookup)\n except concrete_model.DoesNotExist:\n pass\n\n def next_by_order(self):\n \"\"\"\n Retrieves next object by order.\n \"\"\"\n return self.adjacent_by_order(1)\n\n def previous_by_order(self):\n \"\"\"\n Retrieves previous object by order.\n \"\"\"\n return self.adjacent_by_order(-1)\n\n\nclass Ownable(models.Model):\n \"\"\"\n Abstract model that provides ownership of an object for a user.\n \"\"\"\n\n user = models.ForeignKey(\"auth.User\", verbose_name=_(\"Author\"),\n related_name=\"%(class)ss\")\n\n class Meta:\n abstract = True\n\n def is_editable(self, request):\n \"\"\"\n Restrict in-line editing to the objects's owner and superusers.\n \"\"\"\n return request.user.is_superuser or request.user.id == self.user_id\n", "path": "mezzanine/core/models.py" } ]
[ { "content": "\nfrom django.contrib.contenttypes.generic import GenericForeignKey\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models.base import ModelBase\nfrom django.template.defaultfilters import truncatewords_html\nfrom django.utils.html import strip_tags\nfrom django.utils.timesince import timesince\nfrom django.utils.translation import ugettext, ugettext_lazy as _\n\nfrom mezzanine.core.fields import RichTextField\nfrom mezzanine.core.managers import DisplayableManager, CurrentSiteManager\nfrom mezzanine.generic.fields import KeywordsField\nfrom mezzanine.utils.html import TagCloser\nfrom mezzanine.utils.models import base_concrete_model\nfrom mezzanine.utils.sites import current_site_id\nfrom mezzanine.utils.timezone import now\nfrom mezzanine.utils.urls import admin_url, slugify\n\n\nclass SiteRelated(models.Model):\n \"\"\"\n Abstract model for all things site-related. Adds a foreignkey to\n Django's ``Site`` model, and filters by site with all querysets.\n See ``mezzanine.utils.sites.current_site_id`` for implementation\n details.\n \"\"\"\n\n objects = CurrentSiteManager()\n\n class Meta:\n abstract = True\n\n site = models.ForeignKey(\"sites.Site\", editable=False)\n\n def save(self, update_site=False, *args, **kwargs):\n \"\"\"\n Set the site to the current site when the record is first\n created, or the ``update_site`` argument is explicitly set\n to ``True``.\n \"\"\"\n if update_site or not self.id:\n self.site_id = current_site_id()\n super(SiteRelated, self).save(*args, **kwargs)\n\n\nclass Slugged(SiteRelated):\n \"\"\"\n Abstract model that handles auto-generating slugs. Each slugged\n object is also affiliated with a specific site object.\n \"\"\"\n\n title = models.CharField(_(\"Title\"), max_length=500)\n slug = models.CharField(_(\"URL\"), max_length=2000, blank=True, null=True,\n help_text=_(\"Leave blank to have the URL auto-generated from \"\n \"the title.\"))\n\n class Meta:\n abstract = True\n ordering = (\"title\",)\n\n def __unicode__(self):\n return self.title\n\n def save(self, *args, **kwargs):\n \"\"\"\n Create a unique slug by appending an index.\n \"\"\"\n if not self.slug:\n self.slug = self.get_slug()\n # For custom content types, use the ``Page`` instance for\n # slug lookup.\n concrete_model = base_concrete_model(Slugged, self)\n i = 0\n while True:\n if i > 0:\n if i > 1:\n self.slug = self.slug.rsplit(\"-\", 1)[0]\n self.slug = \"%s-%s\" % (self.slug, i)\n qs = concrete_model.objects.all()\n if self.id is not None:\n qs = qs.exclude(id=self.id)\n try:\n qs.get(slug=self.slug)\n except ObjectDoesNotExist:\n break\n i += 1\n super(Slugged, self).save(*args, **kwargs)\n\n def get_slug(self):\n \"\"\"\n Allows subclasses to implement their own slug creation logic.\n \"\"\"\n return slugify(self.title)\n\n def admin_link(self):\n return \"<a href='%s'>%s</a>\" % (self.get_absolute_url(),\n ugettext(\"View on site\"))\n admin_link.allow_tags = True\n admin_link.short_description = \"\"\n\n\nclass MetaData(models.Model):\n \"\"\"\n Abstract model that provides meta data for content.\n \"\"\"\n\n _meta_title = models.CharField(_(\"Title\"), null=True, blank=True,\n max_length=500,\n help_text=_(\"Optional title to be used in the HTML title tag. \"\n \"If left blank, the main title field will be used.\"))\n description = models.TextField(_(\"Description\"), blank=True)\n gen_description = models.BooleanField(_(\"Generate description\"),\n help_text=_(\"If checked, the description will be automatically \"\n \"generated from content. Uncheck if you want to manually \"\n \"set a custom description.\"), default=True)\n keywords = KeywordsField(verbose_name=_(\"Keywords\"))\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n \"\"\"\n Set the description field on save.\n \"\"\"\n if self.gen_description:\n self.description = strip_tags(self.description_from_content())\n super(MetaData, self).save(*args, **kwargs)\n\n def meta_title(self):\n \"\"\"\n Accessor for the optional ``_meta_title`` field, which returns\n the string version of the instance if not provided.\n \"\"\"\n return self._meta_title or unicode(self)\n\n def description_from_content(self):\n \"\"\"\n Returns the first block or sentence of the first content-like\n field.\n \"\"\"\n description = \"\"\n # Use the first RichTextField, or TextField if none found.\n for field_type in (RichTextField, models.TextField):\n if not description:\n for field in self._meta.fields:\n if isinstance(field, field_type) and \\\n field.name != \"description\":\n description = getattr(self, field.name)\n if description:\n break\n # Fall back to the title if description couldn't be determined.\n if not description:\n description = unicode(self)\n # Strip everything after the first block or sentence.\n ends = (\"</p>\", \"<br />\", \"<br/>\", \"<br>\", \"</ul>\",\n \"\\n\", \". \", \"! \", \"? \")\n for end in ends:\n pos = description.lower().find(end)\n if pos > -1:\n description = TagCloser(description[:pos]).html\n break\n else:\n description = truncatewords_html(description, 100)\n return description\n\n\nCONTENT_STATUS_DRAFT = 1\nCONTENT_STATUS_PUBLISHED = 2\nCONTENT_STATUS_CHOICES = (\n (CONTENT_STATUS_DRAFT, _(\"Draft\")),\n (CONTENT_STATUS_PUBLISHED, _(\"Published\")),\n)\n\n\nclass Displayable(Slugged, MetaData):\n \"\"\"\n Abstract model that provides features of a visible page on the\n website such as publishing fields. Basis of Mezzanine pages,\n blog posts, and Cartridge products.\n \"\"\"\n\n status = models.IntegerField(_(\"Status\"),\n choices=CONTENT_STATUS_CHOICES, default=CONTENT_STATUS_PUBLISHED,\n help_text=_(\"With Draft chosen, will only be shown for admin users \"\n \"on the site.\"))\n publish_date = models.DateTimeField(_(\"Published from\"),\n help_text=_(\"With Published chosen, won't be shown until this time\"),\n blank=True, null=True)\n expiry_date = models.DateTimeField(_(\"Expires on\"),\n help_text=_(\"With Published chosen, won't be shown after this time\"),\n blank=True, null=True)\n short_url = models.URLField(blank=True, null=True)\n\n objects = DisplayableManager()\n search_fields = {\"keywords\": 10, \"title\": 5}\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n \"\"\"\n Set default for ``publish_date``. We can't use ``auto_add`` on\n the field as it will be blank when a blog post is created from\n the quick blog form in the admin dashboard.\n \"\"\"\n if self.publish_date is None:\n self.publish_date = now()\n super(Displayable, self).save(*args, **kwargs)\n\n def get_admin_url(self):\n return admin_url(self, \"change\", self.id)\n\n def publish_date_since(self):\n \"\"\"\n Returns the time since ``publish_date``.\n \"\"\"\n return timesince(self.publish_date)\n publish_date_since.short_description = _(\"Published from\")\n\n def get_absolute_url(self):\n \"\"\"\n Raise an error if called on a subclass without\n ``get_absolute_url`` defined, to ensure all search results\n contains a URL.\n \"\"\"\n name = self.__class__.__name__\n raise NotImplementedError(\"The model %s does not have \"\n \"get_absolute_url defined\" % name)\n\n\nclass RichText(models.Model):\n \"\"\"\n Provides a Rich Text field for managing general content and making\n it searchable.\n \"\"\"\n\n content = RichTextField(_(\"Content\"))\n\n search_fields = (\"content\",)\n\n class Meta:\n abstract = True\n\n\nclass OrderableBase(ModelBase):\n \"\"\"\n Checks for ``order_with_respect_to`` on the model's inner ``Meta``\n class and if found, copies it to a custom attribute and deletes it\n since it will cause errors when used with ``ForeignKey(\"self\")``.\n Also creates the ``ordering`` attribute on the ``Meta`` class if\n not yet provided.\n \"\"\"\n\n def __new__(cls, name, bases, attrs):\n if \"Meta\" not in attrs:\n class Meta:\n pass\n attrs[\"Meta\"] = Meta\n if hasattr(attrs[\"Meta\"], \"order_with_respect_to\"):\n order_field = attrs[\"Meta\"].order_with_respect_to\n attrs[\"order_with_respect_to\"] = order_field\n del attrs[\"Meta\"].order_with_respect_to\n if not hasattr(attrs[\"Meta\"], \"ordering\"):\n setattr(attrs[\"Meta\"], \"ordering\", (\"_order\",))\n return super(OrderableBase, cls).__new__(cls, name, bases, attrs)\n\n\nclass Orderable(models.Model):\n \"\"\"\n Abstract model that provides a custom ordering integer field\n similar to using Meta's ``order_with_respect_to``, since to\n date (Django 1.2) this doesn't work with ``ForeignKey(\"self\")``,\n or with Generic Relations. We may also want this feature for\n models that aren't ordered with respect to a particular field.\n \"\"\"\n\n __metaclass__ = OrderableBase\n\n _order = models.IntegerField(_(\"Order\"), null=True)\n\n class Meta:\n abstract = True\n\n def with_respect_to(self):\n \"\"\"\n Returns a dict to use as a filter for ordering operations\n containing the original ``Meta.order_with_respect_to`` value\n if provided. If the field is a Generic Relation, the dict\n returned contains names and values for looking up the\n relation's ``ct_field`` and ``fk_field`` attributes.\n \"\"\"\n try:\n name = self.order_with_respect_to\n value = getattr(self, name)\n except AttributeError:\n # No ``order_with_respect_to`` specified on the model.\n return {}\n # Support for generic relations.\n field = getattr(self.__class__, name)\n if isinstance(field, GenericForeignKey):\n names = (field.ct_field, field.fk_field)\n return dict([(name, getattr(self, name)) for name in names])\n return {name: value}\n\n def save(self, *args, **kwargs):\n \"\"\"\n Set the initial ordering value.\n \"\"\"\n if self._order is None:\n lookup = self.with_respect_to()\n lookup[\"_order__isnull\"] = False\n concrete_model = base_concrete_model(Orderable, self)\n self._order = concrete_model.objects.filter(**lookup).count()\n super(Orderable, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Update the ordering values for siblings.\n \"\"\"\n lookup = self.with_respect_to()\n lookup[\"_order__gte\"] = self._order\n concrete_model = base_concrete_model(Orderable, self)\n after = concrete_model.objects.filter(**lookup)\n after.update(_order=models.F(\"_order\") - 1)\n super(Orderable, self).delete(*args, **kwargs)\n\n def adjacent_by_order(self, direction):\n \"\"\"\n Retrieves next object by order in the given direction.\n \"\"\"\n lookup = self.with_respect_to()\n lookup[\"_order\"] = self._order + direction\n concrete_model = base_concrete_model(Orderable, self)\n try:\n return concrete_model.objects.get(**lookup)\n except concrete_model.DoesNotExist:\n pass\n\n def next_by_order(self):\n \"\"\"\n Retrieves next object by order.\n \"\"\"\n return self.adjacent_by_order(1)\n\n def previous_by_order(self):\n \"\"\"\n Retrieves previous object by order.\n \"\"\"\n return self.adjacent_by_order(-1)\n\n\nclass Ownable(models.Model):\n \"\"\"\n Abstract model that provides ownership of an object for a user.\n \"\"\"\n\n user = models.ForeignKey(\"auth.User\", verbose_name=_(\"Author\"),\n related_name=\"%(class)ss\")\n\n class Meta:\n abstract = True\n\n def is_editable(self, request):\n \"\"\"\n Restrict in-line editing to the objects's owner and superusers.\n \"\"\"\n return request.user.is_superuser or request.user.id == self.user_id\n", "path": "mezzanine/core/models.py" } ]
diff --git a/mezzanine/core/models.py b/mezzanine/core/models.py index 32d60e76b7..e6eda39bc2 100644 --- a/mezzanine/core/models.py +++ b/mezzanine/core/models.py @@ -91,7 +91,7 @@ def get_slug(self): """ Allows subclasses to implement their own slug creation logic. """ - return slugify(self) + return slugify(self.title) def admin_link(self): return "<a href='%s'>%s</a>" % (self.get_absolute_url(),
spyder-ide__spyder-15864
FileNotFoundError in Online help ## Description ### What steps will reproduce the problem? <!--- You can use Markdown here ---> - **Step 1 :** Open `Online help` - **Step 2 :** Open `Virinas-code/Crocrodile` in package search bar and press *<Enter>* ### Error message > Python 3.7.3 [default, GCC 8.3.0] > Linux-4.19.0-16-amd64-x86_64-with-glibc2.28 > Module Index : Topics : Keywords > Error > Virinas-code/Crocrodile.html > There was an error while retrieving documentation for the object you requested: Object could not be found ### Traceback ```python-traceback File "/usr/lib/python3.7/socketserver.py", line 316, in _handle_request_noblock self.process_request(request, client_address) File "/usr/lib/python3.7/socketserver.py", line 347, in process_request self.finish_request(request, client_address) File "/usr/lib/python3.7/socketserver.py", line 360, in finish_request self.RequestHandlerClass(request, client_address, self) File "/usr/lib/python3.7/socketserver.py", line 720, in __init__ self.handle() File "/usr/lib/python3.7/http/server.py", line 426, in handle self.handle_one_request() File "/usr/lib/python3.7/http/server.py", line 414, in handle_one_request method() File "/usr/local/lib/python3.7/dist-packages/spyder/plugins/onlinehelp/pydoc_patch.py", line 911, in do_GET self.path, content_type).encode('utf-8')) File "/usr/local/lib/python3.7/dist-packages/spyder/plugins/onlinehelp/pydoc_patch.py", line 867, in _url_handler with open(css_path) as fp: FileNotFoundError: [Errno 2] No such file or directory: '/usr/local/lib/python3.7/dist-packages/spyder/plugins/onlinehelp/Virinas-code/static/css/dark_pydoc.css' ---------------------------------------- ``` ## Versions * Spyder version: 4.2.5 * Python version: 3.7.3 * Qt version: 5.12.10 * PyQt5 version: 5.12.3 * Operating System: Linux 4.19.0-16-amd64 ### Dependencies ``` # Mandatory: atomicwrites >=1.2.0 : 1.4.0 (OK) chardet >=2.0.0 : 4.0.0 (OK) cloudpickle >=0.5.0 : 1.6.0 (OK) diff_match_patch >=20181111 : 20200713 (OK) intervaltree >=3.0.2 : 3.1.0 (OK) IPython >=7.6.0 : 7.22.0 (OK) jedi =0.17.2 : 0.17.2 (OK) jsonschema >=3.2.0 : 3.2.0 (OK) keyring >=17.0.0 : 17.1.1 (OK) nbconvert >=4.0 : None (NOK) numpydoc >=0.6.0 : 1.1.0 (OK) parso =0.7.0 : 0.7.0 (OK) pexpect >=4.4.0 : 4.6.0 (OK) pickleshare >=0.4 : 0.7.5 (OK) psutil >=5.3 : 5.8.0 (OK) pygments >=2.0 : 2.8.1 (OK) pylint >=1.0 : 2.7.4 (OK) pyls >=0.36.2;<1.0.0 : 0.36.2 (OK) pyls_black >=0.4.6 : 0.4.6 (OK) pyls_spyder >=0.3.2 : 0.3.2 (OK) qdarkstyle >=2.8;<3.0 : 2.8.1 (OK) qtawesome >=0.5.7 : 1.0.2 (OK) qtconsole >=5.0.3 : 5.0.3 (OK) qtpy >=1.5.0 : 1.9.0 (OK) setuptools >=39.0.0 : 56.0.0 (OK) sphinx >=0.6.6 : 3.5.4 (OK) spyder_kernels >=1.10.2;<1.11.0 : 1.10.3 (OK) textdistance >=4.2.0 : 4.2.1 (OK) three_merge >=0.1.1 : 0.1.1 (OK) watchdog >=0.10.3;<2.0.0 : 1.0.2 (OK) xdg >=0.26 : 0.27 (OK) zmq >=17 : 17.1.2 (OK) # Optional: cython >=0.21 : None (NOK) matplotlib >=2.0.0 : 3.4.1 (OK) numpy >=1.7 : 1.20.2 (OK) pandas >=1.1.1 : None (NOK) scipy >=0.17.0 : None (NOK) sympy >=0.7.3 : None (NOK) # Spyder plugins: spyder_terminal : 0.5.0 (OK) ```
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright © 2018 André Roberge - mod_pydoc\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"PyDoc patch\"\"\"\n# Standard libray\nimport builtins\nimport io\nimport inspect\nimport os\nimport pkgutil\nimport platform\nimport re\nimport sys\nimport tokenize\nimport warnings\n\n\n# Local imports\nfrom spyder.config.base import _, DEV\nfrom spyder.config.gui import is_dark_interface, get_font\nfrom spyder.py3compat import PY2, to_text_string\n\nif not PY2:\n from pydoc import (\n classname, classify_class_attrs, describe, Doc, format_exception_only,\n Helper, HTMLRepr, _is_bound_method, ModuleScanner, locate, replace,\n visiblename, isdata, getdoc, deque, _split_list)\n\n class CustomHTMLDoc(Doc):\n \"\"\"\n Formatter class for HTML documentation.\n\n See:\n https://github.com/aroberge/mod_pydoc/blob/master/mod_pydoc/pydoc.py\n \"\"\"\n\n # ------------------------------------------ HTML formatting utilities\n\n _repr_instance = HTMLRepr()\n repr = _repr_instance.repr\n escape = _repr_instance.escape\n\n def page(self, title, contents):\n \"\"\"Format an HTML page.\"\"\"\n return '''\\\n<!doctype html>\n<html><head><title>Python: %s</title>\n<meta charset=\"UTF-8\">\n</head><body>\n%s\n</body></html>''' % (title, contents)\n\n def heading(self, title, extras=''):\n \"\"\"Format a page heading.\"\"\"\n return '''\n <table class=\"heading\">\n <tr><td>{}</td><td class=\"align_right normal\">{}</td></tr></table>\n '''.format(title, extras or '&nbsp;')\n\n def html_section(\n self, title, contents, width=6,\n prelude='', marginalia=None, gap='&nbsp;',\n css_class=''):\n \"\"\"Format a section with a heading.\"\"\"\n result = '''\n <table class=\"{}\">\n <tr>\n <td colspan=\"3\">\n {}</td></tr>\n '''.format(css_class, title)\n if prelude:\n result = result + '''\n <tr><td rowspan=\"2\">{}</td>\n <td colspan=\"2\">{}</td></tr>\n <tr><td>{}</td>'''.format(marginalia, prelude, gap)\n elif marginalia:\n result = result + '''\n <tr><td>{}</td><td>{}</td>'''.format(marginalia, gap)\n\n contents = '{}</td></tr></table><br>'.format(contents)\n return result + '\\n<td class=\"inner_table\">' + contents\n\n def bigsection(self, title, *args, **kwargs):\n \"\"\"Format a section with a big heading.\"\"\"\n title = '<span class=\"section_title\">{}</span>'.format(title)\n return self.html_section(title, *args, **kwargs)\n\n def preformat(self, text):\n \"\"\"Format literal preformatted text.\"\"\"\n text = self.escape(text.expandtabs())\n return replace(text, '\\n\\n', '\\n \\n', '\\n\\n', '\\n \\n',\n ' ', '&nbsp;', '\\n', '<br>\\n')\n\n def multicolumn(self, list, format, cols=4):\n \"\"\"Format a list of items into a multi-column list.\"\"\"\n result = ''\n rows = (len(list)+cols-1)//cols\n for col in range(cols):\n result = (\n result + '<td style=\"width:%d%%;vertical-align:text-top\">'\n % (100//cols))\n for i in range(rows*col, rows*col+rows):\n if i < len(list):\n result = result + format(list[i]) + '<br>\\n'\n result = result + '</td>'\n return '<table style=\"width:100%%\"><tr>%s</tr></table>' % result\n\n def grey(self, text):\n \"\"\"Grey span.\"\"\"\n return '<span class=\"grey\">%s</span>' % text\n\n def namelink(self, name, *dicts):\n \"\"\"Make a link for an identifier, given name-to-URL mappings.\"\"\"\n for dict in dicts:\n if name in dict:\n return '<a href=\"%s\">%s</a>' % (dict[name], name)\n return name\n\n def classlink(self, object, modname):\n \"\"\"Make a link for a class.\"\"\"\n name, module = object.__name__, sys.modules.get(object.__module__)\n if hasattr(module, name) and getattr(module, name) is object:\n return '<a href=\"%s.html#%s\">%s</a>' % (\n module.__name__, name, classname(object, modname))\n return classname(object, modname)\n\n def modulelink(self, object):\n \"\"\"Make a link for a module.\"\"\"\n return '<a href=\"%s.html\">%s</a>' % (\n object.__name__, object.__name__)\n\n def modpkglink(self, modpkginfo):\n \"\"\"Make a link for a module or package to display in an index.\"\"\"\n name, path, ispackage, shadowed = modpkginfo\n if shadowed:\n return self.grey(name)\n if path:\n url = '%s.%s.html' % (path, name)\n else:\n url = '%s.html' % name\n if ispackage:\n text = '%s&nbsp;(package)' % name\n else:\n text = name\n return '<a href=\"%s\">%s</a>' % (url, text)\n\n def filelink(self, url, path):\n \"\"\"Make a link to source file.\"\"\"\n return '<a href=\"file:%s\">%s</a>' % (url, path)\n\n def markup(self, text, escape=None, funcs={}, classes={}, methods={}):\n \"\"\"\n Mark up some plain text, given a context of symbols to look for.\n\n Each context dictionary maps object names to anchor names.\n \"\"\"\n escape = escape or self.escape\n results = []\n here = 0\n pattern = re.compile(r'\\b((http|ftp)://\\S+[\\w/]|'\n r'RFC[- ]?(\\d+)|'\n r'PEP[- ]?(\\d+)|'\n r'(self\\.)?(\\w+))')\n while True:\n match = pattern.search(text, here)\n if not match:\n break\n start, end = match.span()\n results.append(escape(text[here:start]))\n\n all, scheme, rfc, pep, selfdot, name = match.groups()\n if scheme:\n url = escape(all).replace('\"', '&quot;')\n results.append('<a href=\"%s\">%s</a>' % (url, url))\n elif rfc:\n url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)\n results.append('<a href=\"%s\">%s</a>' % (url, escape(all)))\n elif pep:\n url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)\n results.append('<a href=\"%s\">%s</a>' % (url, escape(all)))\n elif text[end:end+1] == '(':\n results.append(\n self.namelink(name, methods, funcs, classes))\n elif selfdot:\n results.append('self.%s' % name)\n else:\n results.append(self.namelink(name, classes))\n here = end\n results.append(escape(text[here:]))\n return ''.join(results)\n\n # --------------------------------------------- type-specific routines\n\n def formattree(self, tree, modname, parent=None):\n \"\"\"\n Produce HTML for a class tree as given by inspect.getclasstree().\n \"\"\"\n result = ''\n for entry in tree:\n if type(entry) is type(()):\n c, bases = entry\n result = result + '<dt>'\n result = result + self.classlink(c, modname)\n if bases and bases != (parent,):\n parents = []\n for base in bases:\n parents.append(self.classlink(base, modname))\n result = result + '(' + ', '.join(parents) + ')'\n result = result + '\\n</dt>'\n elif type(entry) is type([]):\n result = result + '<dd>\\n%s</dd>\\n' % self.formattree(\n entry, modname, c)\n return '<dl><dt></dt>\\n%s<dd></dd></dl>\\n' % result\n\n def docmodule(self, object, name=None, mod=None, *ignored):\n \"\"\"Produce HTML documentation for a module object.\"\"\"\n name = object.__name__ # ignore the passed-in name\n try:\n all = object.__all__\n except AttributeError:\n all = None\n parts = name.split('.')\n links = []\n for i in range(len(parts)-1):\n links.append(\n '<a href=\"{}.html\" class=\"docmodule_link\">{}</a>'.format(\n '.'.join(parts[:i+1]), parts[i]))\n head = '.'.join(links + parts[-1:])\n try:\n path = inspect.getabsfile(object)\n url = path\n if sys.platform == 'win32':\n import nturl2path\n url = nturl2path.pathname2url(path)\n filelink = self.filelink(url, path)\n except TypeError:\n filelink = '(built-in)'\n info = []\n if hasattr(object, '__version__'):\n version = str(object.__version__)\n if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':\n version = version[11:-1].strip()\n info.append('version %s' % self.escape(version))\n if hasattr(object, '__date__'):\n info.append(self.escape(str(object.__date__)))\n if info:\n head = head + ' (%s)' % ', '.join(info)\n docloc = self.getdocloc(object)\n if docloc is not None:\n docloc = (\n '<br><a href=\"%(docloc)s\">Module Reference</a>' % locals())\n else:\n docloc = ''\n extras = '<a href=\".\">index</a><br>' + filelink + docloc\n result = self.heading(head, extras)\n\n modules = inspect.getmembers(object, inspect.ismodule)\n\n classes, cdict = [], {}\n for key, value in inspect.getmembers(object, inspect.isclass):\n # if __all__ exists, believe it. Otherwise use old heuristic.\n if (all is not None or\n (inspect.getmodule(value) or object) is object):\n if visiblename(key, all, object):\n classes.append((key, value))\n cdict[key] = cdict[value] = '#' + key\n for key, value in classes:\n for base in value.__bases__:\n key, modname = base.__name__, base.__module__\n module = sys.modules.get(modname)\n if modname != name and module and hasattr(module, key):\n if getattr(module, key) is base:\n if key not in cdict:\n cdict[key] = cdict[base] = (\n modname + '.html#' + key)\n funcs, fdict = [], {}\n for key, value in inspect.getmembers(object, inspect.isroutine):\n # if __all__ exists, believe it. Otherwise use old heuristic.\n if (all is not None or\n inspect.isbuiltin(value) or\n inspect.getmodule(value) is object):\n if visiblename(key, all, object):\n funcs.append((key, value))\n fdict[key] = '#-' + key\n if inspect.isfunction(value):\n fdict[value] = fdict[key]\n data = []\n for key, value in inspect.getmembers(object, isdata):\n if visiblename(key, all, object):\n data.append((key, value))\n\n doc = self.markup(getdoc(object), self.preformat, fdict, cdict)\n doc = doc and '<code>{}</code>'.format(doc)\n result = result + '<p>%s</p>\\n' % doc\n\n if hasattr(object, '__path__'):\n modpkgs = []\n for importer, modname, ispkg in pkgutil.iter_modules(\n object.__path__):\n modpkgs.append((modname, name, ispkg, 0))\n modpkgs.sort()\n contents = self.multicolumn(modpkgs, self.modpkglink)\n result = result + self.bigsection(\n 'Package Contents', contents, css_class=\"package\")\n elif modules:\n contents = self.multicolumn(\n modules, lambda t: self.modulelink(t[1]))\n result = result + self.bigsection(\n 'Modules', contents, css_class=\"module\")\n\n if classes:\n classlist = [value for (key, value) in classes]\n contents = [\n self.formattree(inspect.getclasstree(classlist, 1), name)]\n for key, value in classes:\n contents.append(\n self.document(value, key, name, fdict, cdict))\n result = result + self.bigsection(\n 'Classes', ' '.join(contents), css_class=\"classes\")\n if funcs:\n contents = []\n for key, value in funcs:\n contents.append(\n self.document(value, key, name, fdict, cdict))\n result = result + self.bigsection(\n 'Functions', ' '.join(contents), css_class=\"functions\")\n if data:\n contents = []\n for key, value in data:\n contents.append(self.document(value, key))\n result = result + self.bigsection(\n 'Data', '<br>\\n'.join(contents), css_class=\"data\")\n if hasattr(object, '__author__'):\n contents = self.markup(str(object.__author__), self.preformat)\n result = result + self.bigsection(\n 'Author', contents, css_class=\"author\")\n if hasattr(object, '__credits__'):\n contents = self.markup(str(object.__credits__), self.preformat)\n result = result + self.bigsection(\n 'Credits', contents, css_class=\"credits\")\n\n return result\n\n def docclass(self, object, name=None, mod=None, funcs={}, classes={},\n *ignored):\n \"\"\"Produce HTML documentation for a class object.\"\"\"\n realname = object.__name__\n name = name or realname\n bases = object.__bases__\n\n contents = []\n push = contents.append\n\n # Cute little class to pump out a horizontal rule between sections.\n class HorizontalRule:\n def __init__(self):\n self.needone = 0\n\n def maybe(self):\n if self.needone:\n push('<hr>\\n')\n self.needone = 1\n hr = HorizontalRule()\n\n # List the mro, if non-trivial.\n mro = deque(inspect.getmro(object))\n if len(mro) > 2:\n hr.maybe()\n push('<dl><dt>Method resolution order:</dt>\\n')\n for base in mro:\n push('<dd>%s</dd>\\n' % self.classlink(base,\n object.__module__))\n push('</dl>\\n')\n\n def spill(msg, attrs, predicate):\n ok, attrs = _split_list(attrs, predicate)\n if ok:\n hr.maybe()\n push(msg)\n for name, kind, homecls, value in ok:\n try:\n value = getattr(object, name)\n except Exception:\n # Some descriptors may meet a failure\n # in their __get__.\n # (bug aroberge/mod_pydoc#1785)\n push(self._docdescriptor(name, value, mod))\n else:\n push(self.document(\n value, name, mod, funcs, classes, mdict,\n object))\n push('\\n')\n return attrs\n\n def spilldescriptors(msg, attrs, predicate):\n ok, attrs = _split_list(attrs, predicate)\n if ok:\n hr.maybe()\n push(msg)\n for name, kind, homecls, value in ok:\n push(self._docdescriptor(name, value, mod))\n return attrs\n\n def spilldata(msg, attrs, predicate):\n ok, attrs = _split_list(attrs, predicate)\n if ok:\n hr.maybe()\n push(msg)\n for name, kind, homecls, value in ok:\n base = self.docother(getattr(object, name), name, mod)\n if callable(value) or inspect.isdatadescriptor(value):\n doc = getattr(value, \"__doc__\", None)\n else:\n doc = None\n if doc is None:\n push('<dl><dt>%s</dt><dd></dd></dl>\\n' % base)\n else:\n doc = self.markup(getdoc(value), self.preformat,\n funcs, classes, mdict)\n doc = '<dd><code>%s</code></dd>' % doc\n push('<dl><dt>%s%s</dt></dl>\\n' % (base, doc))\n push('\\n')\n return attrs\n\n attrs = [(name, kind, cls, value)\n for name, kind, cls, value in classify_class_attrs(object)\n if visiblename(name, obj=object)]\n\n mdict = {}\n for key, kind, homecls, value in attrs:\n mdict[key] = anchor = '#' + name + '-' + key\n try:\n value = getattr(object, name)\n except Exception:\n # Some descriptors may meet a failure in their __get__.\n # (bug #1785)\n pass\n try:\n # The value may not be hashable (e.g., a data attr with\n # a dict or list value).\n mdict[value] = anchor\n except TypeError:\n pass\n\n while attrs:\n if mro:\n thisclass = mro.popleft()\n else:\n thisclass = attrs[0][2]\n attrs, inherited = _split_list(\n attrs, lambda t: t[2] is thisclass)\n\n if thisclass is builtins.object:\n attrs = inherited\n continue\n elif thisclass is object:\n tag = 'defined here'\n else:\n tag = 'inherited from %s' % self.classlink(\n thisclass, object.__module__)\n tag += ':<br>\\n'\n\n # Sort attrs by name.\n attrs.sort(key=lambda t: t[0])\n\n # Pump out the attrs, segregated by kind.\n attrs = spill('Methods %s' % tag, attrs,\n lambda t: t[1] == 'method')\n attrs = spill('Class methods %s' % tag, attrs,\n lambda t: t[1] == 'class method')\n attrs = spill('Static methods %s' % tag, attrs,\n lambda t: t[1] == 'static method')\n attrs = spilldescriptors('Data descriptors %s' % tag, attrs,\n lambda t: t[1] == 'data descriptor')\n attrs = spilldata('Data and other attributes %s' % tag, attrs,\n lambda t: t[1] == 'data')\n assert attrs == []\n attrs = inherited\n\n contents = ''.join(contents)\n\n if name == realname:\n title = '<span id=\"%s\" class=\"signature\"> class %s</span>' % (\n name, realname)\n else:\n title = (\n '%s = <span id=\"%s\" class=\"signature\">class %s</span>' % (\n name, name, realname))\n if bases:\n parents = []\n for base in bases:\n parents.append(self.classlink(base, object.__module__))\n title = title + '(%s)' % ', '.join(parents)\n doc = self.markup(\n getdoc(object), self.preformat, funcs, classes, mdict)\n doc = doc and '<code>%s<br>&nbsp;</code>' % doc\n\n return self.html_section(\n title, contents, 3, doc, css_class=\"docclass\")\n\n def formatvalue(self, object):\n \"\"\"Format an argument default value as text.\"\"\"\n return self.grey('=' + self.repr(object))\n\n def docroutine(self, object, name=None, mod=None,\n funcs={}, classes={}, methods={}, cl=None):\n \"\"\"Produce HTML documentation for a function or method object.\"\"\"\n realname = object.__name__\n name = name or realname\n anchor = (cl and cl.__name__ or '') + '-' + name\n note = ''\n skipdocs = 0\n if _is_bound_method(object):\n imclass = object.__self__.__class__\n if cl:\n if imclass is not cl:\n note = ' from ' + self.classlink(imclass, mod)\n else:\n if object.__self__ is not None:\n note = ' method of %s instance' % self.classlink(\n object.__self__.__class__, mod)\n else:\n note = ' unbound %s method' % self.classlink(\n imclass, mod)\n\n if name == realname:\n title = '<span id=\"%s\" class=\"signature\">%s</span>' % (\n anchor, realname)\n else:\n if (cl and realname in cl.__dict__ and\n cl.__dict__[realname] is object):\n reallink = '<a href=\"#%s\">%s</a>' % (\n cl.__name__ + '-' + realname, realname)\n skipdocs = 1\n else:\n reallink = realname\n title = '<span id=\"%s\" class=\"signature\">%s</span> = %s' % (\n anchor, name, reallink)\n argspec = None\n if inspect.isroutine(object):\n try:\n signature = inspect.signature(object)\n except (ValueError, TypeError):\n signature = None\n if signature:\n argspec = str(signature)\n if realname == '<lambda>':\n title = '%s <em>lambda</em> ' % name\n # XXX lambda's won't usually have\n # func_annotations['return']\n # since the syntax doesn't support but it is possible.\n # So removing parentheses isn't truly safe.\n argspec = argspec[1:-1] # remove parentheses\n if not argspec:\n argspec = '(...)'\n\n decl = title + argspec + (note and self.grey(note))\n\n if skipdocs:\n return '<dl><dt>%s</dt><dd></dd></dl>\\n' % decl\n else:\n doc = self.markup(\n getdoc(object), self.preformat, funcs, classes, methods)\n doc = doc and '<dd><code>%s</code></dd>' % doc\n return '<dl><dt>%s</dt><dd></dd>%s</dl>\\n' % (decl, doc)\n\n def _docdescriptor(self, name, value, mod):\n results = []\n push = results.append\n\n if name:\n push('<dl><dt>%s</dt>\\n' % name)\n if value.__doc__ is not None:\n doc = self.markup(getdoc(value), self.preformat)\n push('<dd><code>%s</code></dd>\\n' % doc)\n push('<dd></dd></dl>\\n')\n\n return ''.join(results)\n\n def docproperty(self, object, name=None, mod=None, cl=None):\n \"\"\"Produce html documentation for a property.\"\"\"\n return self._docdescriptor(name, object, mod)\n\n def docother(self, object, name=None, mod=None, *ignored):\n \"\"\"Produce HTML documentation for a data object.\"\"\"\n lhs = name and '%s = ' % name or ''\n return lhs + self.repr(object)\n\n def docdata(self, object, name=None, mod=None, cl=None):\n \"\"\"Produce html documentation for a data descriptor.\"\"\"\n return self._docdescriptor(name, object, mod)\n\n def index(self, dir, shadowed=None):\n \"\"\"Generate an HTML index for a directory of modules.\"\"\"\n modpkgs = []\n if shadowed is None:\n shadowed = {}\n for importer, name, ispkg in pkgutil.iter_modules([dir]):\n if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):\n # ignore a module if its name contains a\n # surrogate character\n continue\n modpkgs.append((name, '', ispkg, name in shadowed))\n shadowed[name] = 1\n\n modpkgs.sort()\n if len(modpkgs):\n contents = self.multicolumn(modpkgs, self.modpkglink)\n return self.bigsection(dir, contents, css_class=\"index\")\n else:\n return ''\n\n\ndef _url_handler(url, content_type=\"text/html\"):\n \"\"\"Pydoc url handler for use with the pydoc server.\n\n If the content_type is 'text/css', the _pydoc.css style\n sheet is read and returned if it exits.\n\n If the content_type is 'text/html', then the result of\n get_html_page(url) is returned.\n\n See https://github.com/python/cpython/blob/master/Lib/pydoc.py\n \"\"\"\n class _HTMLDoc(CustomHTMLDoc):\n\n def page(self, title, contents):\n \"\"\"Format an HTML page.\"\"\"\n rich_text_font = get_font(option=\"rich_font\").family()\n plain_text_font = get_font(option=\"font\").family()\n\n if is_dark_interface():\n css_path = \"static/css/dark_pydoc.css\"\n else:\n css_path = \"static/css/light_pydoc.css\"\n\n css_link = (\n '<link rel=\"stylesheet\" type=\"text/css\" href=\"%s\">' %\n css_path)\n\n code_style = (\n '<style>code {font-family: \"%s\"}</style>' % plain_text_font)\n\n html_page = '''\\\n<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">\n<html><head><title>Pydoc: %s</title>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n%s%s</head><body style=\"clear:both;font-family:'%s'\">\n%s<div style=\"clear:both;padding-top:.7em;\">%s</div>\n</body></html>''' % (title, css_link, code_style, rich_text_font,\n html_navbar(), contents)\n\n return html_page\n\n def filelink(self, url, path):\n return '<a href=\"getfile?key=%s\">%s</a>' % (url, path)\n\n html = _HTMLDoc()\n\n def html_navbar():\n version = html.escape(\"%s [%s, %s]\" % (platform.python_version(),\n platform.python_build()[0],\n platform.python_compiler()))\n return \"\"\"\n <div style='float:left'>\n Python %s<br>%s\n </div>\n <div style='float:right'>\n <div style='text-align:right; padding-bottom:.7em;'>\n <a href=\"index.html\">Module Index</a>\n : <a href=\"topics.html\">Topics</a>\n : <a href=\"keywords.html\">Keywords</a>\n </div>\n <div style='text-align:right;'>\n <form action=\"search\" style='display:inline;'>\n <input class=\"input-search\" type=text name=key size=\"22\">\n <input class=\"submit-search\" type=submit value=\"Search\">\n </form>\n </div>\n </div>\n \"\"\" % (version, html.escape(platform.platform(terse=True)))\n\n def html_index():\n \"\"\"Index page.\"\"\"\n def bltinlink(name):\n return '<a href=\"%s.html\">%s</a>' % (name, name)\n\n heading = html.heading('<span>Index of Modules</span>')\n names = [name for name in sys.builtin_module_names\n if name != '__main__']\n contents = html.multicolumn(names, bltinlink)\n contents = [heading, '<p>' + html.bigsection(\n 'Built-in Modules', contents, css_class=\"builtin_modules\")]\n\n seen = {}\n for dir in sys.path:\n\n contents.append(html.index(dir, seen))\n\n contents.append(\n '<p class=\"ka_ping_yee\"><strong>pydoc</strong> by Ka-Ping Yee'\n '&lt;[email protected]&gt;</p>')\n return 'Index of Modules', ''.join(contents)\n\n def html_search(key):\n \"\"\"Search results page.\"\"\"\n # scan for modules\n search_result = []\n\n def callback(path, modname, desc):\n if modname[-9:] == '.__init__':\n modname = modname[:-9] + ' (package)'\n search_result.append((modname, desc and '- ' + desc))\n\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore') # ignore problems during import\n ModuleScanner().run(callback, key)\n\n # format page\n def bltinlink(name):\n return '<a href=\"%s.html\">%s</a>' % (name, name)\n\n results = []\n heading = html.heading('Search Results')\n\n for name, desc in search_result:\n results.append(bltinlink(name) + desc)\n contents = heading + html.bigsection(\n 'key = {}'.format(key), '<br>'.join(results), css_class=\"search\")\n return 'Search Results', contents\n\n def html_getfile(path):\n \"\"\"Get and display a source file listing safely.\"\"\"\n path = path.replace('%20', ' ')\n with tokenize.open(path) as fp:\n lines = html.escape(fp.read())\n body = '<pre>%s</pre>' % lines\n heading = html.heading('File Listing')\n\n contents = heading + html.bigsection('File: {}'.format(path), body,\n css_class=\"getfile\")\n return 'getfile %s' % path, contents\n\n def html_topics():\n \"\"\"Index of topic texts available.\"\"\"\n def bltinlink(name):\n return '<a href=\"topic?key=%s\">%s</a>' % (name, name)\n\n heading = html.heading('Index of Topics') + '<br>'\n names = sorted(Helper.topics.keys())\n\n contents = html.multicolumn(names, bltinlink)\n contents = heading + html.bigsection(\n 'Topics', contents, css_class=\"topics\")\n return 'Topics', contents\n\n def html_keywords():\n \"\"\"Index of keywords.\"\"\"\n heading = html.heading('Index of Keywords')\n names = sorted(Helper.keywords.keys())\n\n def bltinlink(name):\n return '<a href=\"topic?key=%s\">%s</a>' % (name, name)\n\n contents = html.multicolumn(names, bltinlink)\n contents = heading + '<br>' + html.bigsection(\n 'Keywords', contents, css_class=\"keywords\")\n return 'Keywords', contents\n\n def html_topicpage(topic):\n \"\"\"Topic or keyword help page.\"\"\"\n buf = io.StringIO()\n htmlhelp = Helper(buf, buf)\n contents, xrefs = htmlhelp._gettopic(topic)\n if topic in htmlhelp.keywords:\n title = 'Keyword'\n else:\n title = 'Topic'\n heading = html.heading(title)\n contents = '<pre>%s</pre>' % html.markup(contents)\n contents = html.bigsection(topic, contents, css_class=\"topics\")\n if xrefs:\n xrefs = sorted(xrefs.split())\n\n def bltinlink(name):\n return '<a href=\"topic?key=%s\">%s</a>' % (name, name)\n\n xrefs = html.multicolumn(xrefs, bltinlink)\n xrefs = html.html_section('Related help topics: ', xrefs,\n css_class=\"topics\")\n return ('%s %s' % (title, topic),\n ''.join((heading, contents, xrefs)))\n\n def html_getobj(url):\n obj = locate(url, forceload=1)\n if obj is None and url != 'None':\n raise ValueError(\n _('There was an error while retrieving documentation '\n 'for the object you requested: Object could not be found'))\n title = describe(obj)\n content = html.document(obj, url)\n return title, content\n\n def html_error(url, exc):\n heading = html.heading('Error')\n if DEV:\n contents = '<br>'.join(html.escape(line) for line in\n format_exception_only(type(exc), exc))\n else:\n contents = '%s' % to_text_string(exc)\n contents = heading + html.bigsection(url, contents, css_class=\"error\")\n return \"Error - %s\" % url, contents\n\n def get_html_page(url):\n \"\"\"Generate an HTML page for url.\"\"\"\n complete_url = url\n if url.endswith('.html'):\n url = url[:-5]\n try:\n if url in (\"\", \"index\"):\n title, content = html_index()\n elif url == \"topics\":\n title, content = html_topics()\n elif url == \"keywords\":\n title, content = html_keywords()\n elif '=' in url:\n op, _, url = url.partition('=')\n if op == \"search?key\":\n title, content = html_search(url)\n elif op == \"getfile?key\":\n title, content = html_getfile(url)\n elif op == \"topic?key\":\n # try topics first, then objects.\n try:\n title, content = html_topicpage(url)\n except ValueError:\n title, content = html_getobj(url)\n elif op == \"get?key\":\n # try objects first, then topics.\n if url in (\"\", \"index\"):\n title, content = html_index()\n else:\n try:\n title, content = html_getobj(url)\n except ValueError:\n title, content = html_topicpage(url)\n else:\n raise ValueError(\n _('There was an error while retrieving documentation '\n 'for the object you requested: Bad URL %s') % url)\n else:\n title, content = html_getobj(url)\n except Exception as exc:\n # Catch any errors and display them in an error page.\n title, content = html_error(complete_url, exc)\n return html.page(title, content)\n\n if url.startswith('/'):\n url = url[1:]\n if content_type == 'text/css':\n path_here = os.path.dirname(os.path.realpath(__file__))\n css_path = os.path.join(path_here, url)\n with open(css_path) as fp:\n return ''.join(fp.readlines())\n elif content_type == 'text/html':\n return get_html_page(url)\n # Errors outside the url handler are caught by the server.\n raise TypeError(\n _('There was an error while retrieving documentation '\n 'for the object you requested: unknown content type %r for url %s')\n % (content_type, url))\n\n\ndef _start_server(urlhandler, hostname, port):\n \"\"\"\n Start an HTTP server thread on a specific port.\n\n This is a reimplementation of `pydoc._start_server` to handle connection\n errors for 'do_GET'.\n\n Taken from PyDoc: https://github.com/python/cpython/blob/3.7/Lib/pydoc.py\n \"\"\"\n import http.server\n import email.message\n import select\n import threading\n import time\n\n class DocHandler(http.server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n \"\"\"Process a request from an HTML browser.\n\n The URL received is in self.path.\n Get an HTML page from self.urlhandler and send it.\n \"\"\"\n if self.path.endswith('.css'):\n content_type = 'text/css'\n else:\n content_type = 'text/html'\n self.send_response(200)\n self.send_header(\n 'Content-Type', '%s; charset=UTF-8' % content_type)\n self.end_headers()\n try:\n self.wfile.write(self.urlhandler(\n self.path, content_type).encode('utf-8'))\n except ConnectionAbortedError:\n # Needed to handle error when client closes the connection,\n # for example when the client stops the load of the previously\n # requested page. See spyder-ide/spyder#10755\n pass\n except BrokenPipeError:\n # Needed to handle permission error when trying to open a port\n # for the web server of the online help.\n # See spyder-ide/spyder#13388\n pass\n\n def log_message(self, *args):\n # Don't log messages.\n pass\n\n class DocServer(http.server.HTTPServer):\n\n def __init__(self, host, port, callback):\n self.host = host\n self.address = (self.host, port)\n self.callback = callback\n self.base.__init__(self, self.address, self.handler)\n self.quit = False\n\n def serve_until_quit(self):\n while not self.quit:\n rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)\n if rd:\n self.handle_request()\n self.server_close()\n\n def server_activate(self):\n self.base.server_activate(self)\n if self.callback:\n self.callback(self)\n\n class ServerThread(threading.Thread):\n\n def __init__(self, urlhandler, host, port):\n self.urlhandler = urlhandler\n self.host = host\n self.port = int(port)\n threading.Thread.__init__(self)\n self.serving = False\n self.error = None\n\n def run(self):\n \"\"\"Start the server.\"\"\"\n try:\n DocServer.base = http.server.HTTPServer\n DocServer.handler = DocHandler\n DocHandler.MessageClass = email.message.Message\n DocHandler.urlhandler = staticmethod(self.urlhandler)\n docsvr = DocServer(self.host, self.port, self.ready)\n self.docserver = docsvr\n docsvr.serve_until_quit()\n except Exception as e:\n self.error = e\n\n def ready(self, server):\n self.serving = True\n self.host = server.host\n self.port = server.server_port\n self.url = 'http://%s:%d/' % (self.host, self.port)\n\n def stop(self):\n \"\"\"Stop the server and this thread nicely.\"\"\"\n self.docserver.quit = True\n self.join()\n # explicitly break a reference cycle: DocServer.callback\n # has indirectly a reference to ServerThread.\n self.docserver = None\n self.serving = False\n self.url = None\n\n thread = ServerThread(urlhandler, hostname, port)\n thread.start()\n # Wait until thread.serving is True to make sure we are\n # really up before returning.\n while not thread.error and not thread.serving:\n time.sleep(.01)\n return thread\n", "path": "spyder/plugins/onlinehelp/pydoc_patch.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright © 2018 André Roberge - mod_pydoc\n# Copyright © Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"PyDoc patch\"\"\"\n# Standard libray\nimport builtins\nimport io\nimport inspect\nimport os\nimport pkgutil\nimport platform\nimport re\nimport sys\nimport tokenize\nimport warnings\n\n\n# Local imports\nfrom spyder.config.base import _, DEV\nfrom spyder.config.gui import is_dark_interface, get_font\nfrom spyder.py3compat import PY2, to_text_string\n\nif not PY2:\n from pydoc import (\n classname, classify_class_attrs, describe, Doc, format_exception_only,\n Helper, HTMLRepr, _is_bound_method, ModuleScanner, locate, replace,\n visiblename, isdata, getdoc, deque, _split_list)\n\n class CustomHTMLDoc(Doc):\n \"\"\"\n Formatter class for HTML documentation.\n\n See:\n https://github.com/aroberge/mod_pydoc/blob/master/mod_pydoc/pydoc.py\n \"\"\"\n\n # ------------------------------------------ HTML formatting utilities\n\n _repr_instance = HTMLRepr()\n repr = _repr_instance.repr\n escape = _repr_instance.escape\n\n def page(self, title, contents):\n \"\"\"Format an HTML page.\"\"\"\n return '''\\\n<!doctype html>\n<html><head><title>Python: %s</title>\n<meta charset=\"UTF-8\">\n</head><body>\n%s\n</body></html>''' % (title, contents)\n\n def heading(self, title, extras=''):\n \"\"\"Format a page heading.\"\"\"\n return '''\n <table class=\"heading\">\n <tr><td>{}</td><td class=\"align_right normal\">{}</td></tr></table>\n '''.format(title, extras or '&nbsp;')\n\n def html_section(\n self, title, contents, width=6,\n prelude='', marginalia=None, gap='&nbsp;',\n css_class=''):\n \"\"\"Format a section with a heading.\"\"\"\n result = '''\n <table class=\"{}\">\n <tr>\n <td colspan=\"3\">\n {}</td></tr>\n '''.format(css_class, title)\n if prelude:\n result = result + '''\n <tr><td rowspan=\"2\">{}</td>\n <td colspan=\"2\">{}</td></tr>\n <tr><td>{}</td>'''.format(marginalia, prelude, gap)\n elif marginalia:\n result = result + '''\n <tr><td>{}</td><td>{}</td>'''.format(marginalia, gap)\n\n contents = '{}</td></tr></table><br>'.format(contents)\n return result + '\\n<td class=\"inner_table\">' + contents\n\n def bigsection(self, title, *args, **kwargs):\n \"\"\"Format a section with a big heading.\"\"\"\n title = '<span class=\"section_title\">{}</span>'.format(title)\n return self.html_section(title, *args, **kwargs)\n\n def preformat(self, text):\n \"\"\"Format literal preformatted text.\"\"\"\n text = self.escape(text.expandtabs())\n return replace(text, '\\n\\n', '\\n \\n', '\\n\\n', '\\n \\n',\n ' ', '&nbsp;', '\\n', '<br>\\n')\n\n def multicolumn(self, list, format, cols=4):\n \"\"\"Format a list of items into a multi-column list.\"\"\"\n result = ''\n rows = (len(list)+cols-1)//cols\n for col in range(cols):\n result = (\n result + '<td style=\"width:%d%%;vertical-align:text-top\">'\n % (100//cols))\n for i in range(rows*col, rows*col+rows):\n if i < len(list):\n result = result + format(list[i]) + '<br>\\n'\n result = result + '</td>'\n return '<table style=\"width:100%%\"><tr>%s</tr></table>' % result\n\n def grey(self, text):\n \"\"\"Grey span.\"\"\"\n return '<span class=\"grey\">%s</span>' % text\n\n def namelink(self, name, *dicts):\n \"\"\"Make a link for an identifier, given name-to-URL mappings.\"\"\"\n for dict in dicts:\n if name in dict:\n return '<a href=\"%s\">%s</a>' % (dict[name], name)\n return name\n\n def classlink(self, object, modname):\n \"\"\"Make a link for a class.\"\"\"\n name, module = object.__name__, sys.modules.get(object.__module__)\n if hasattr(module, name) and getattr(module, name) is object:\n return '<a href=\"%s.html#%s\">%s</a>' % (\n module.__name__, name, classname(object, modname))\n return classname(object, modname)\n\n def modulelink(self, object):\n \"\"\"Make a link for a module.\"\"\"\n return '<a href=\"%s.html\">%s</a>' % (\n object.__name__, object.__name__)\n\n def modpkglink(self, modpkginfo):\n \"\"\"Make a link for a module or package to display in an index.\"\"\"\n name, path, ispackage, shadowed = modpkginfo\n if shadowed:\n return self.grey(name)\n if path:\n url = '%s.%s.html' % (path, name)\n else:\n url = '%s.html' % name\n if ispackage:\n text = '%s&nbsp;(package)' % name\n else:\n text = name\n return '<a href=\"%s\">%s</a>' % (url, text)\n\n def filelink(self, url, path):\n \"\"\"Make a link to source file.\"\"\"\n return '<a href=\"file:%s\">%s</a>' % (url, path)\n\n def markup(self, text, escape=None, funcs={}, classes={}, methods={}):\n \"\"\"\n Mark up some plain text, given a context of symbols to look for.\n\n Each context dictionary maps object names to anchor names.\n \"\"\"\n escape = escape or self.escape\n results = []\n here = 0\n pattern = re.compile(r'\\b((http|ftp)://\\S+[\\w/]|'\n r'RFC[- ]?(\\d+)|'\n r'PEP[- ]?(\\d+)|'\n r'(self\\.)?(\\w+))')\n while True:\n match = pattern.search(text, here)\n if not match:\n break\n start, end = match.span()\n results.append(escape(text[here:start]))\n\n all, scheme, rfc, pep, selfdot, name = match.groups()\n if scheme:\n url = escape(all).replace('\"', '&quot;')\n results.append('<a href=\"%s\">%s</a>' % (url, url))\n elif rfc:\n url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)\n results.append('<a href=\"%s\">%s</a>' % (url, escape(all)))\n elif pep:\n url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)\n results.append('<a href=\"%s\">%s</a>' % (url, escape(all)))\n elif text[end:end+1] == '(':\n results.append(\n self.namelink(name, methods, funcs, classes))\n elif selfdot:\n results.append('self.%s' % name)\n else:\n results.append(self.namelink(name, classes))\n here = end\n results.append(escape(text[here:]))\n return ''.join(results)\n\n # --------------------------------------------- type-specific routines\n\n def formattree(self, tree, modname, parent=None):\n \"\"\"\n Produce HTML for a class tree as given by inspect.getclasstree().\n \"\"\"\n result = ''\n for entry in tree:\n if type(entry) is type(()):\n c, bases = entry\n result = result + '<dt>'\n result = result + self.classlink(c, modname)\n if bases and bases != (parent,):\n parents = []\n for base in bases:\n parents.append(self.classlink(base, modname))\n result = result + '(' + ', '.join(parents) + ')'\n result = result + '\\n</dt>'\n elif type(entry) is type([]):\n result = result + '<dd>\\n%s</dd>\\n' % self.formattree(\n entry, modname, c)\n return '<dl><dt></dt>\\n%s<dd></dd></dl>\\n' % result\n\n def docmodule(self, object, name=None, mod=None, *ignored):\n \"\"\"Produce HTML documentation for a module object.\"\"\"\n name = object.__name__ # ignore the passed-in name\n try:\n all = object.__all__\n except AttributeError:\n all = None\n parts = name.split('.')\n links = []\n for i in range(len(parts)-1):\n links.append(\n '<a href=\"{}.html\" class=\"docmodule_link\">{}</a>'.format(\n '.'.join(parts[:i+1]), parts[i]))\n head = '.'.join(links + parts[-1:])\n try:\n path = inspect.getabsfile(object)\n url = path\n if sys.platform == 'win32':\n import nturl2path\n url = nturl2path.pathname2url(path)\n filelink = self.filelink(url, path)\n except TypeError:\n filelink = '(built-in)'\n info = []\n if hasattr(object, '__version__'):\n version = str(object.__version__)\n if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':\n version = version[11:-1].strip()\n info.append('version %s' % self.escape(version))\n if hasattr(object, '__date__'):\n info.append(self.escape(str(object.__date__)))\n if info:\n head = head + ' (%s)' % ', '.join(info)\n docloc = self.getdocloc(object)\n if docloc is not None:\n docloc = (\n '<br><a href=\"%(docloc)s\">Module Reference</a>' % locals())\n else:\n docloc = ''\n extras = '<a href=\".\">index</a><br>' + filelink + docloc\n result = self.heading(head, extras)\n\n modules = inspect.getmembers(object, inspect.ismodule)\n\n classes, cdict = [], {}\n for key, value in inspect.getmembers(object, inspect.isclass):\n # if __all__ exists, believe it. Otherwise use old heuristic.\n if (all is not None or\n (inspect.getmodule(value) or object) is object):\n if visiblename(key, all, object):\n classes.append((key, value))\n cdict[key] = cdict[value] = '#' + key\n for key, value in classes:\n for base in value.__bases__:\n key, modname = base.__name__, base.__module__\n module = sys.modules.get(modname)\n if modname != name and module and hasattr(module, key):\n if getattr(module, key) is base:\n if key not in cdict:\n cdict[key] = cdict[base] = (\n modname + '.html#' + key)\n funcs, fdict = [], {}\n for key, value in inspect.getmembers(object, inspect.isroutine):\n # if __all__ exists, believe it. Otherwise use old heuristic.\n if (all is not None or\n inspect.isbuiltin(value) or\n inspect.getmodule(value) is object):\n if visiblename(key, all, object):\n funcs.append((key, value))\n fdict[key] = '#-' + key\n if inspect.isfunction(value):\n fdict[value] = fdict[key]\n data = []\n for key, value in inspect.getmembers(object, isdata):\n if visiblename(key, all, object):\n data.append((key, value))\n\n doc = self.markup(getdoc(object), self.preformat, fdict, cdict)\n doc = doc and '<code>{}</code>'.format(doc)\n result = result + '<p>%s</p>\\n' % doc\n\n if hasattr(object, '__path__'):\n modpkgs = []\n for importer, modname, ispkg in pkgutil.iter_modules(\n object.__path__):\n modpkgs.append((modname, name, ispkg, 0))\n modpkgs.sort()\n contents = self.multicolumn(modpkgs, self.modpkglink)\n result = result + self.bigsection(\n 'Package Contents', contents, css_class=\"package\")\n elif modules:\n contents = self.multicolumn(\n modules, lambda t: self.modulelink(t[1]))\n result = result + self.bigsection(\n 'Modules', contents, css_class=\"module\")\n\n if classes:\n classlist = [value for (key, value) in classes]\n contents = [\n self.formattree(inspect.getclasstree(classlist, 1), name)]\n for key, value in classes:\n contents.append(\n self.document(value, key, name, fdict, cdict))\n result = result + self.bigsection(\n 'Classes', ' '.join(contents), css_class=\"classes\")\n if funcs:\n contents = []\n for key, value in funcs:\n contents.append(\n self.document(value, key, name, fdict, cdict))\n result = result + self.bigsection(\n 'Functions', ' '.join(contents), css_class=\"functions\")\n if data:\n contents = []\n for key, value in data:\n contents.append(self.document(value, key))\n result = result + self.bigsection(\n 'Data', '<br>\\n'.join(contents), css_class=\"data\")\n if hasattr(object, '__author__'):\n contents = self.markup(str(object.__author__), self.preformat)\n result = result + self.bigsection(\n 'Author', contents, css_class=\"author\")\n if hasattr(object, '__credits__'):\n contents = self.markup(str(object.__credits__), self.preformat)\n result = result + self.bigsection(\n 'Credits', contents, css_class=\"credits\")\n\n return result\n\n def docclass(self, object, name=None, mod=None, funcs={}, classes={},\n *ignored):\n \"\"\"Produce HTML documentation for a class object.\"\"\"\n realname = object.__name__\n name = name or realname\n bases = object.__bases__\n\n contents = []\n push = contents.append\n\n # Cute little class to pump out a horizontal rule between sections.\n class HorizontalRule:\n def __init__(self):\n self.needone = 0\n\n def maybe(self):\n if self.needone:\n push('<hr>\\n')\n self.needone = 1\n hr = HorizontalRule()\n\n # List the mro, if non-trivial.\n mro = deque(inspect.getmro(object))\n if len(mro) > 2:\n hr.maybe()\n push('<dl><dt>Method resolution order:</dt>\\n')\n for base in mro:\n push('<dd>%s</dd>\\n' % self.classlink(base,\n object.__module__))\n push('</dl>\\n')\n\n def spill(msg, attrs, predicate):\n ok, attrs = _split_list(attrs, predicate)\n if ok:\n hr.maybe()\n push(msg)\n for name, kind, homecls, value in ok:\n try:\n value = getattr(object, name)\n except Exception:\n # Some descriptors may meet a failure\n # in their __get__.\n # (bug aroberge/mod_pydoc#1785)\n push(self._docdescriptor(name, value, mod))\n else:\n push(self.document(\n value, name, mod, funcs, classes, mdict,\n object))\n push('\\n')\n return attrs\n\n def spilldescriptors(msg, attrs, predicate):\n ok, attrs = _split_list(attrs, predicate)\n if ok:\n hr.maybe()\n push(msg)\n for name, kind, homecls, value in ok:\n push(self._docdescriptor(name, value, mod))\n return attrs\n\n def spilldata(msg, attrs, predicate):\n ok, attrs = _split_list(attrs, predicate)\n if ok:\n hr.maybe()\n push(msg)\n for name, kind, homecls, value in ok:\n base = self.docother(getattr(object, name), name, mod)\n if callable(value) or inspect.isdatadescriptor(value):\n doc = getattr(value, \"__doc__\", None)\n else:\n doc = None\n if doc is None:\n push('<dl><dt>%s</dt><dd></dd></dl>\\n' % base)\n else:\n doc = self.markup(getdoc(value), self.preformat,\n funcs, classes, mdict)\n doc = '<dd><code>%s</code></dd>' % doc\n push('<dl><dt>%s%s</dt></dl>\\n' % (base, doc))\n push('\\n')\n return attrs\n\n attrs = [(name, kind, cls, value)\n for name, kind, cls, value in classify_class_attrs(object)\n if visiblename(name, obj=object)]\n\n mdict = {}\n for key, kind, homecls, value in attrs:\n mdict[key] = anchor = '#' + name + '-' + key\n try:\n value = getattr(object, name)\n except Exception:\n # Some descriptors may meet a failure in their __get__.\n # (bug #1785)\n pass\n try:\n # The value may not be hashable (e.g., a data attr with\n # a dict or list value).\n mdict[value] = anchor\n except TypeError:\n pass\n\n while attrs:\n if mro:\n thisclass = mro.popleft()\n else:\n thisclass = attrs[0][2]\n attrs, inherited = _split_list(\n attrs, lambda t: t[2] is thisclass)\n\n if thisclass is builtins.object:\n attrs = inherited\n continue\n elif thisclass is object:\n tag = 'defined here'\n else:\n tag = 'inherited from %s' % self.classlink(\n thisclass, object.__module__)\n tag += ':<br>\\n'\n\n # Sort attrs by name.\n attrs.sort(key=lambda t: t[0])\n\n # Pump out the attrs, segregated by kind.\n attrs = spill('Methods %s' % tag, attrs,\n lambda t: t[1] == 'method')\n attrs = spill('Class methods %s' % tag, attrs,\n lambda t: t[1] == 'class method')\n attrs = spill('Static methods %s' % tag, attrs,\n lambda t: t[1] == 'static method')\n attrs = spilldescriptors('Data descriptors %s' % tag, attrs,\n lambda t: t[1] == 'data descriptor')\n attrs = spilldata('Data and other attributes %s' % tag, attrs,\n lambda t: t[1] == 'data')\n assert attrs == []\n attrs = inherited\n\n contents = ''.join(contents)\n\n if name == realname:\n title = '<span id=\"%s\" class=\"signature\"> class %s</span>' % (\n name, realname)\n else:\n title = (\n '%s = <span id=\"%s\" class=\"signature\">class %s</span>' % (\n name, name, realname))\n if bases:\n parents = []\n for base in bases:\n parents.append(self.classlink(base, object.__module__))\n title = title + '(%s)' % ', '.join(parents)\n doc = self.markup(\n getdoc(object), self.preformat, funcs, classes, mdict)\n doc = doc and '<code>%s<br>&nbsp;</code>' % doc\n\n return self.html_section(\n title, contents, 3, doc, css_class=\"docclass\")\n\n def formatvalue(self, object):\n \"\"\"Format an argument default value as text.\"\"\"\n return self.grey('=' + self.repr(object))\n\n def docroutine(self, object, name=None, mod=None,\n funcs={}, classes={}, methods={}, cl=None):\n \"\"\"Produce HTML documentation for a function or method object.\"\"\"\n realname = object.__name__\n name = name or realname\n anchor = (cl and cl.__name__ or '') + '-' + name\n note = ''\n skipdocs = 0\n if _is_bound_method(object):\n imclass = object.__self__.__class__\n if cl:\n if imclass is not cl:\n note = ' from ' + self.classlink(imclass, mod)\n else:\n if object.__self__ is not None:\n note = ' method of %s instance' % self.classlink(\n object.__self__.__class__, mod)\n else:\n note = ' unbound %s method' % self.classlink(\n imclass, mod)\n\n if name == realname:\n title = '<span id=\"%s\" class=\"signature\">%s</span>' % (\n anchor, realname)\n else:\n if (cl and realname in cl.__dict__ and\n cl.__dict__[realname] is object):\n reallink = '<a href=\"#%s\">%s</a>' % (\n cl.__name__ + '-' + realname, realname)\n skipdocs = 1\n else:\n reallink = realname\n title = '<span id=\"%s\" class=\"signature\">%s</span> = %s' % (\n anchor, name, reallink)\n argspec = None\n if inspect.isroutine(object):\n try:\n signature = inspect.signature(object)\n except (ValueError, TypeError):\n signature = None\n if signature:\n argspec = str(signature)\n if realname == '<lambda>':\n title = '%s <em>lambda</em> ' % name\n # XXX lambda's won't usually have\n # func_annotations['return']\n # since the syntax doesn't support but it is possible.\n # So removing parentheses isn't truly safe.\n argspec = argspec[1:-1] # remove parentheses\n if not argspec:\n argspec = '(...)'\n\n decl = title + argspec + (note and self.grey(note))\n\n if skipdocs:\n return '<dl><dt>%s</dt><dd></dd></dl>\\n' % decl\n else:\n doc = self.markup(\n getdoc(object), self.preformat, funcs, classes, methods)\n doc = doc and '<dd><code>%s</code></dd>' % doc\n return '<dl><dt>%s</dt><dd></dd>%s</dl>\\n' % (decl, doc)\n\n def _docdescriptor(self, name, value, mod):\n results = []\n push = results.append\n\n if name:\n push('<dl><dt>%s</dt>\\n' % name)\n if value.__doc__ is not None:\n doc = self.markup(getdoc(value), self.preformat)\n push('<dd><code>%s</code></dd>\\n' % doc)\n push('<dd></dd></dl>\\n')\n\n return ''.join(results)\n\n def docproperty(self, object, name=None, mod=None, cl=None):\n \"\"\"Produce html documentation for a property.\"\"\"\n return self._docdescriptor(name, object, mod)\n\n def docother(self, object, name=None, mod=None, *ignored):\n \"\"\"Produce HTML documentation for a data object.\"\"\"\n lhs = name and '%s = ' % name or ''\n return lhs + self.repr(object)\n\n def docdata(self, object, name=None, mod=None, cl=None):\n \"\"\"Produce html documentation for a data descriptor.\"\"\"\n return self._docdescriptor(name, object, mod)\n\n def index(self, dir, shadowed=None):\n \"\"\"Generate an HTML index for a directory of modules.\"\"\"\n modpkgs = []\n if shadowed is None:\n shadowed = {}\n for importer, name, ispkg in pkgutil.iter_modules([dir]):\n if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):\n # ignore a module if its name contains a\n # surrogate character\n continue\n modpkgs.append((name, '', ispkg, name in shadowed))\n shadowed[name] = 1\n\n modpkgs.sort()\n if len(modpkgs):\n contents = self.multicolumn(modpkgs, self.modpkglink)\n return self.bigsection(dir, contents, css_class=\"index\")\n else:\n return ''\n\n\ndef _url_handler(url, content_type=\"text/html\"):\n \"\"\"Pydoc url handler for use with the pydoc server.\n\n If the content_type is 'text/css', the _pydoc.css style\n sheet is read and returned if it exits.\n\n If the content_type is 'text/html', then the result of\n get_html_page(url) is returned.\n\n See https://github.com/python/cpython/blob/master/Lib/pydoc.py\n \"\"\"\n class _HTMLDoc(CustomHTMLDoc):\n\n def page(self, title, contents):\n \"\"\"Format an HTML page.\"\"\"\n rich_text_font = get_font(option=\"rich_font\").family()\n plain_text_font = get_font(option=\"font\").family()\n\n if is_dark_interface():\n css_path = \"static/css/dark_pydoc.css\"\n else:\n css_path = \"static/css/light_pydoc.css\"\n\n css_link = (\n '<link rel=\"stylesheet\" type=\"text/css\" href=\"/%s\">' %\n css_path)\n\n code_style = (\n '<style>code {font-family: \"%s\"}</style>' % plain_text_font)\n\n html_page = '''\\\n<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">\n<html><head><title>Pydoc: %s</title>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n%s%s</head><body style=\"clear:both;font-family:'%s'\">\n%s<div style=\"clear:both;padding-top:.7em;\">%s</div>\n</body></html>''' % (title, css_link, code_style, rich_text_font,\n html_navbar(), contents)\n\n return html_page\n\n def filelink(self, url, path):\n return '<a href=\"getfile?key=%s\">%s</a>' % (url, path)\n\n html = _HTMLDoc()\n\n def html_navbar():\n version = html.escape(\"%s [%s, %s]\" % (platform.python_version(),\n platform.python_build()[0],\n platform.python_compiler()))\n return \"\"\"\n <div style='float:left'>\n Python %s<br>%s\n </div>\n <div style='float:right'>\n <div style='text-align:right; padding-bottom:.7em;'>\n <a href=\"index.html\">Module Index</a>\n : <a href=\"topics.html\">Topics</a>\n : <a href=\"keywords.html\">Keywords</a>\n </div>\n <div style='text-align:right;'>\n <form action=\"search\" style='display:inline;'>\n <input class=\"input-search\" type=text name=key size=\"22\">\n <input class=\"submit-search\" type=submit value=\"Search\">\n </form>\n </div>\n </div>\n \"\"\" % (version, html.escape(platform.platform(terse=True)))\n\n def html_index():\n \"\"\"Index page.\"\"\"\n def bltinlink(name):\n return '<a href=\"%s.html\">%s</a>' % (name, name)\n\n heading = html.heading('<span>Index of Modules</span>')\n names = [name for name in sys.builtin_module_names\n if name != '__main__']\n contents = html.multicolumn(names, bltinlink)\n contents = [heading, '<p>' + html.bigsection(\n 'Built-in Modules', contents, css_class=\"builtin_modules\")]\n\n seen = {}\n for dir in sys.path:\n\n contents.append(html.index(dir, seen))\n\n contents.append(\n '<p class=\"ka_ping_yee\"><strong>pydoc</strong> by Ka-Ping Yee'\n '&lt;[email protected]&gt;</p>')\n return 'Index of Modules', ''.join(contents)\n\n def html_search(key):\n \"\"\"Search results page.\"\"\"\n # scan for modules\n search_result = []\n\n def callback(path, modname, desc):\n if modname[-9:] == '.__init__':\n modname = modname[:-9] + ' (package)'\n search_result.append((modname, desc and '- ' + desc))\n\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore') # ignore problems during import\n ModuleScanner().run(callback, key)\n\n # format page\n def bltinlink(name):\n return '<a href=\"%s.html\">%s</a>' % (name, name)\n\n results = []\n heading = html.heading('Search Results')\n\n for name, desc in search_result:\n results.append(bltinlink(name) + desc)\n contents = heading + html.bigsection(\n 'key = {}'.format(key), '<br>'.join(results), css_class=\"search\")\n return 'Search Results', contents\n\n def html_getfile(path):\n \"\"\"Get and display a source file listing safely.\"\"\"\n path = path.replace('%20', ' ')\n with tokenize.open(path) as fp:\n lines = html.escape(fp.read())\n body = '<pre>%s</pre>' % lines\n heading = html.heading('File Listing')\n\n contents = heading + html.bigsection('File: {}'.format(path), body,\n css_class=\"getfile\")\n return 'getfile %s' % path, contents\n\n def html_topics():\n \"\"\"Index of topic texts available.\"\"\"\n def bltinlink(name):\n return '<a href=\"topic?key=%s\">%s</a>' % (name, name)\n\n heading = html.heading('Index of Topics') + '<br>'\n names = sorted(Helper.topics.keys())\n\n contents = html.multicolumn(names, bltinlink)\n contents = heading + html.bigsection(\n 'Topics', contents, css_class=\"topics\")\n return 'Topics', contents\n\n def html_keywords():\n \"\"\"Index of keywords.\"\"\"\n heading = html.heading('Index of Keywords')\n names = sorted(Helper.keywords.keys())\n\n def bltinlink(name):\n return '<a href=\"topic?key=%s\">%s</a>' % (name, name)\n\n contents = html.multicolumn(names, bltinlink)\n contents = heading + '<br>' + html.bigsection(\n 'Keywords', contents, css_class=\"keywords\")\n return 'Keywords', contents\n\n def html_topicpage(topic):\n \"\"\"Topic or keyword help page.\"\"\"\n buf = io.StringIO()\n htmlhelp = Helper(buf, buf)\n contents, xrefs = htmlhelp._gettopic(topic)\n if topic in htmlhelp.keywords:\n title = 'Keyword'\n else:\n title = 'Topic'\n heading = html.heading(title)\n contents = '<pre>%s</pre>' % html.markup(contents)\n contents = html.bigsection(topic, contents, css_class=\"topics\")\n if xrefs:\n xrefs = sorted(xrefs.split())\n\n def bltinlink(name):\n return '<a href=\"topic?key=%s\">%s</a>' % (name, name)\n\n xrefs = html.multicolumn(xrefs, bltinlink)\n xrefs = html.html_section('Related help topics: ', xrefs,\n css_class=\"topics\")\n return ('%s %s' % (title, topic),\n ''.join((heading, contents, xrefs)))\n\n def html_getobj(url):\n obj = locate(url, forceload=1)\n if obj is None and url != 'None':\n raise ValueError(\n _('There was an error while retrieving documentation '\n 'for the object you requested: Object could not be found'))\n title = describe(obj)\n content = html.document(obj, url)\n return title, content\n\n def html_error(url, exc):\n heading = html.heading('Error')\n if DEV:\n contents = '<br>'.join(html.escape(line) for line in\n format_exception_only(type(exc), exc))\n else:\n contents = '%s' % to_text_string(exc)\n contents = heading + html.bigsection(url, contents, css_class=\"error\")\n return \"Error - %s\" % url, contents\n\n def get_html_page(url):\n \"\"\"Generate an HTML page for url.\"\"\"\n complete_url = url\n if url.endswith('.html'):\n url = url[:-5]\n try:\n if url in (\"\", \"index\"):\n title, content = html_index()\n elif url == \"topics\":\n title, content = html_topics()\n elif url == \"keywords\":\n title, content = html_keywords()\n elif '=' in url:\n op, _, url = url.partition('=')\n if op == \"search?key\":\n title, content = html_search(url)\n elif op == \"getfile?key\":\n title, content = html_getfile(url)\n elif op == \"topic?key\":\n # try topics first, then objects.\n try:\n title, content = html_topicpage(url)\n except ValueError:\n title, content = html_getobj(url)\n elif op == \"get?key\":\n # try objects first, then topics.\n if url in (\"\", \"index\"):\n title, content = html_index()\n else:\n try:\n title, content = html_getobj(url)\n except ValueError:\n title, content = html_topicpage(url)\n else:\n raise ValueError(\n _('There was an error while retrieving documentation '\n 'for the object you requested: Bad URL %s') % url)\n else:\n title, content = html_getobj(url)\n except Exception as exc:\n # Catch any errors and display them in an error page.\n title, content = html_error(complete_url, exc)\n return html.page(title, content)\n\n if url.startswith('/'):\n url = url[1:]\n if content_type == 'text/css':\n path_here = os.path.dirname(os.path.realpath(__file__))\n css_path = os.path.join(path_here, url)\n with open(css_path) as fp:\n return ''.join(fp.readlines())\n elif content_type == 'text/html':\n return get_html_page(url)\n # Errors outside the url handler are caught by the server.\n raise TypeError(\n _('There was an error while retrieving documentation '\n 'for the object you requested: unknown content type %r for url %s')\n % (content_type, url))\n\n\ndef _start_server(urlhandler, hostname, port):\n \"\"\"\n Start an HTTP server thread on a specific port.\n\n This is a reimplementation of `pydoc._start_server` to handle connection\n errors for 'do_GET'.\n\n Taken from PyDoc: https://github.com/python/cpython/blob/3.7/Lib/pydoc.py\n \"\"\"\n import http.server\n import email.message\n import select\n import threading\n import time\n\n class DocHandler(http.server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n \"\"\"Process a request from an HTML browser.\n\n The URL received is in self.path.\n Get an HTML page from self.urlhandler and send it.\n \"\"\"\n if self.path.endswith('.css'):\n content_type = 'text/css'\n else:\n content_type = 'text/html'\n self.send_response(200)\n self.send_header(\n 'Content-Type', '%s; charset=UTF-8' % content_type)\n self.end_headers()\n try:\n self.wfile.write(self.urlhandler(\n self.path, content_type).encode('utf-8'))\n except ConnectionAbortedError:\n # Needed to handle error when client closes the connection,\n # for example when the client stops the load of the previously\n # requested page. See spyder-ide/spyder#10755\n pass\n except BrokenPipeError:\n # Needed to handle permission error when trying to open a port\n # for the web server of the online help.\n # See spyder-ide/spyder#13388\n pass\n\n def log_message(self, *args):\n # Don't log messages.\n pass\n\n class DocServer(http.server.HTTPServer):\n\n def __init__(self, host, port, callback):\n self.host = host\n self.address = (self.host, port)\n self.callback = callback\n self.base.__init__(self, self.address, self.handler)\n self.quit = False\n\n def serve_until_quit(self):\n while not self.quit:\n rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)\n if rd:\n self.handle_request()\n self.server_close()\n\n def server_activate(self):\n self.base.server_activate(self)\n if self.callback:\n self.callback(self)\n\n class ServerThread(threading.Thread):\n\n def __init__(self, urlhandler, host, port):\n self.urlhandler = urlhandler\n self.host = host\n self.port = int(port)\n threading.Thread.__init__(self)\n self.serving = False\n self.error = None\n\n def run(self):\n \"\"\"Start the server.\"\"\"\n try:\n DocServer.base = http.server.HTTPServer\n DocServer.handler = DocHandler\n DocHandler.MessageClass = email.message.Message\n DocHandler.urlhandler = staticmethod(self.urlhandler)\n docsvr = DocServer(self.host, self.port, self.ready)\n self.docserver = docsvr\n docsvr.serve_until_quit()\n except Exception as e:\n self.error = e\n\n def ready(self, server):\n self.serving = True\n self.host = server.host\n self.port = server.server_port\n self.url = 'http://%s:%d/' % (self.host, self.port)\n\n def stop(self):\n \"\"\"Stop the server and this thread nicely.\"\"\"\n self.docserver.quit = True\n self.join()\n # explicitly break a reference cycle: DocServer.callback\n # has indirectly a reference to ServerThread.\n self.docserver = None\n self.serving = False\n self.url = None\n\n thread = ServerThread(urlhandler, hostname, port)\n thread.start()\n # Wait until thread.serving is True to make sure we are\n # really up before returning.\n while not thread.error and not thread.serving:\n time.sleep(.01)\n return thread\n", "path": "spyder/plugins/onlinehelp/pydoc_patch.py" } ]
diff --git a/spyder/plugins/onlinehelp/pydoc_patch.py b/spyder/plugins/onlinehelp/pydoc_patch.py index ac4d6b136a6..c43ae379775 100644 --- a/spyder/plugins/onlinehelp/pydoc_patch.py +++ b/spyder/plugins/onlinehelp/pydoc_patch.py @@ -639,7 +639,7 @@ def page(self, title, contents): css_path = "static/css/light_pydoc.css" css_link = ( - '<link rel="stylesheet" type="text/css" href="%s">' % + '<link rel="stylesheet" type="text/css" href="/%s">' % css_path) code_style = (
facebookresearch__fairseq-200
Assertion Error in preprocessing of FAIR Sequence-to-Sequence Toolkit for Story Generation when I run $ TEXT=examples/stories/writingPrompts $ python preprocess.py --source-lang wp_source --target-lang wp_target \ --trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \ --destdir data-bin/writingPrompts --thresholdtgt 10 --thresholdsrc 10 I get an AssertionError: Traceback (most recent call last): File "preprocess.py", line 201, in <module> main(args) File "preprocess.py", line 101, in main padding_factor=args.padding_factor, File "/home/username/workspace/benchmarking-tools/fairseq/fairseq/data/dictionary.py", line 137, in finalize assert len(new_count) == self.nspecial or min(new_count[self.nspecial:]) >= threshold AssertionError Upon inspection i see that min(new_count[self.nspecial:]) is 0
[ { "content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nfrom collections import Counter\nimport os\n\nimport torch\n\n\nclass Dictionary(object):\n \"\"\"A mapping from symbols to consecutive integers\"\"\"\n def __init__(self, pad='<pad>', eos='</s>', unk='<unk>'):\n self.unk_word, self.pad_word, self.eos_word = unk, pad, eos\n self.symbols = []\n self.count = []\n self.indices = {}\n # dictionary indexing starts at 1 for consistency with Lua\n self.add_symbol('<Lua heritage>')\n self.pad_index = self.add_symbol(pad)\n self.eos_index = self.add_symbol(eos)\n self.unk_index = self.add_symbol(unk)\n self.nspecial = len(self.symbols)\n\n def __eq__(self, other):\n return self.indices == other.indices\n\n def __getitem__(self, idx):\n if idx < len(self.symbols):\n return self.symbols[idx]\n return self.unk_word\n\n def __len__(self):\n \"\"\"Returns the number of symbols in the dictionary\"\"\"\n return len(self.symbols)\n\n def index(self, sym):\n \"\"\"Returns the index of the specified symbol\"\"\"\n if sym in self.indices:\n return self.indices[sym]\n return self.unk_index\n\n def string(self, tensor, bpe_symbol=None, escape_unk=False):\n \"\"\"Helper for converting a tensor of token indices to a string.\n\n Can optionally remove BPE symbols or escape <unk> words.\n \"\"\"\n if torch.is_tensor(tensor) and tensor.dim() == 2:\n return '\\n'.join(self.string(t) for t in tensor)\n\n def token_string(i):\n if i == self.unk():\n return self.unk_string(escape_unk)\n else:\n return self[i]\n\n sent = ' '.join(token_string(i) for i in tensor if i != self.eos())\n if bpe_symbol is not None:\n sent = (sent + ' ').replace(bpe_symbol, '').rstrip()\n return sent\n\n def unk_string(self, escape=False):\n \"\"\"Return unknown string, optionally escaped as: <<unk>>\"\"\"\n if escape:\n return '<{}>'.format(self.unk_word)\n else:\n return self.unk_word\n\n def add_symbol(self, word, n=1):\n \"\"\"Adds a word to the dictionary\"\"\"\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + n\n return idx\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(n)\n return idx\n\n def update(self, new_dict):\n \"\"\"Updates counts from new dictionary.\"\"\"\n for word in new_dict.symbols:\n idx2 = new_dict.indices[word]\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + new_dict.count[idx2]\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(new_dict.count[idx2])\n\n def finalize(self, threshold=-1, nwords=-1, padding_factor=8):\n \"\"\"Sort symbols by frequency in descending order, ignoring special ones.\n\n Args:\n - threshold defines the minimum word count\n - nwords defines the total number of words in the final dictionary,\n including special symbols\n - padding_factor can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n \"\"\"\n if nwords <= 0:\n nwords = len(self)\n\n new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))\n new_symbols = self.symbols[:self.nspecial]\n new_count = self.count[:self.nspecial]\n\n c = Counter(dict(zip(self.symbols[self.nspecial:], self.count[self.nspecial:])))\n for symbol, count in c.most_common(nwords - self.nspecial):\n if count >= threshold:\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(count)\n else:\n break\n\n threshold_nwords = len(new_symbols)\n if padding_factor > 1:\n i = 0\n while threshold_nwords % padding_factor != 0:\n symbol = 'madeupword{:04d}'.format(i)\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(0)\n i += 1\n threshold_nwords += 1\n\n assert len(new_count) == self.nspecial or min(new_count[self.nspecial:]) >= threshold\n assert len(new_symbols) % padding_factor == 0\n assert len(new_symbols) == len(new_indices)\n\n self.count = list(new_count)\n self.symbols = list(new_symbols)\n self.indices = new_indices\n\n def pad(self):\n \"\"\"Helper to get index of pad symbol\"\"\"\n return self.pad_index\n\n def eos(self):\n \"\"\"Helper to get index of end-of-sentence symbol\"\"\"\n return self.eos_index\n\n def unk(self):\n \"\"\"Helper to get index of unk symbol\"\"\"\n return self.unk_index\n\n @classmethod\n def load(cls, f, ignore_utf_errors=False):\n \"\"\"Loads the dictionary from a text file with the format:\n\n ```\n <symbol0> <count0>\n <symbol1> <count1>\n ...\n ```\n \"\"\"\n if isinstance(f, str):\n try:\n if not ignore_utf_errors:\n with open(f, 'r', encoding='utf-8') as fd:\n return cls.load(fd)\n else:\n with open(f, 'r', encoding='utf-8', errors='ignore') as fd:\n return cls.load(fd)\n except FileNotFoundError as fnfe:\n raise fnfe\n except Exception:\n raise Exception(\"Incorrect encoding detected in {}, please \"\n \"rebuild the dataset\".format(f))\n\n d = cls()\n for line in f.readlines():\n idx = line.rfind(' ')\n word = line[:idx]\n count = int(line[idx+1:])\n d.indices[word] = len(d.symbols)\n d.symbols.append(word)\n d.count.append(count)\n return d\n\n def save(self, f):\n \"\"\"Stores dictionary into a text file\"\"\"\n if isinstance(f, str):\n os.makedirs(os.path.dirname(f), exist_ok=True)\n with open(f, 'w', encoding='utf-8') as fd:\n return self.save(fd)\n for symbol, count in zip(self.symbols[self.nspecial:], self.count[self.nspecial:]):\n print('{} {}'.format(symbol, count), file=f)\n\n def dummy_sentence(self, length):\n t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()\n t[-1] = self.eos()\n return t\n", "path": "fairseq/data/dictionary.py" } ]
[ { "content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nfrom collections import Counter\nimport os\n\nimport torch\n\n\nclass Dictionary(object):\n \"\"\"A mapping from symbols to consecutive integers\"\"\"\n def __init__(self, pad='<pad>', eos='</s>', unk='<unk>'):\n self.unk_word, self.pad_word, self.eos_word = unk, pad, eos\n self.symbols = []\n self.count = []\n self.indices = {}\n # dictionary indexing starts at 1 for consistency with Lua\n self.add_symbol('<Lua heritage>')\n self.pad_index = self.add_symbol(pad)\n self.eos_index = self.add_symbol(eos)\n self.unk_index = self.add_symbol(unk)\n self.nspecial = len(self.symbols)\n\n def __eq__(self, other):\n return self.indices == other.indices\n\n def __getitem__(self, idx):\n if idx < len(self.symbols):\n return self.symbols[idx]\n return self.unk_word\n\n def __len__(self):\n \"\"\"Returns the number of symbols in the dictionary\"\"\"\n return len(self.symbols)\n\n def index(self, sym):\n \"\"\"Returns the index of the specified symbol\"\"\"\n if sym in self.indices:\n return self.indices[sym]\n return self.unk_index\n\n def string(self, tensor, bpe_symbol=None, escape_unk=False):\n \"\"\"Helper for converting a tensor of token indices to a string.\n\n Can optionally remove BPE symbols or escape <unk> words.\n \"\"\"\n if torch.is_tensor(tensor) and tensor.dim() == 2:\n return '\\n'.join(self.string(t) for t in tensor)\n\n def token_string(i):\n if i == self.unk():\n return self.unk_string(escape_unk)\n else:\n return self[i]\n\n sent = ' '.join(token_string(i) for i in tensor if i != self.eos())\n if bpe_symbol is not None:\n sent = (sent + ' ').replace(bpe_symbol, '').rstrip()\n return sent\n\n def unk_string(self, escape=False):\n \"\"\"Return unknown string, optionally escaped as: <<unk>>\"\"\"\n if escape:\n return '<{}>'.format(self.unk_word)\n else:\n return self.unk_word\n\n def add_symbol(self, word, n=1):\n \"\"\"Adds a word to the dictionary\"\"\"\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + n\n return idx\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(n)\n return idx\n\n def update(self, new_dict):\n \"\"\"Updates counts from new dictionary.\"\"\"\n for word in new_dict.symbols:\n idx2 = new_dict.indices[word]\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + new_dict.count[idx2]\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(new_dict.count[idx2])\n\n def finalize(self, threshold=-1, nwords=-1, padding_factor=8):\n \"\"\"Sort symbols by frequency in descending order, ignoring special ones.\n\n Args:\n - threshold defines the minimum word count\n - nwords defines the total number of words in the final dictionary,\n including special symbols\n - padding_factor can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n \"\"\"\n if nwords <= 0:\n nwords = len(self)\n\n new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))\n new_symbols = self.symbols[:self.nspecial]\n new_count = self.count[:self.nspecial]\n\n c = Counter(dict(zip(self.symbols[self.nspecial:], self.count[self.nspecial:])))\n for symbol, count in c.most_common(nwords - self.nspecial):\n if count >= threshold:\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(count)\n else:\n break\n\n threshold_nwords = len(new_symbols)\n if padding_factor > 1:\n i = 0\n while threshold_nwords % padding_factor != 0:\n symbol = 'madeupword{:04d}'.format(i)\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(0)\n i += 1\n threshold_nwords += 1\n\n assert len(new_symbols) % padding_factor == 0\n assert len(new_symbols) == len(new_indices)\n\n self.count = list(new_count)\n self.symbols = list(new_symbols)\n self.indices = new_indices\n\n def pad(self):\n \"\"\"Helper to get index of pad symbol\"\"\"\n return self.pad_index\n\n def eos(self):\n \"\"\"Helper to get index of end-of-sentence symbol\"\"\"\n return self.eos_index\n\n def unk(self):\n \"\"\"Helper to get index of unk symbol\"\"\"\n return self.unk_index\n\n @classmethod\n def load(cls, f, ignore_utf_errors=False):\n \"\"\"Loads the dictionary from a text file with the format:\n\n ```\n <symbol0> <count0>\n <symbol1> <count1>\n ...\n ```\n \"\"\"\n if isinstance(f, str):\n try:\n if not ignore_utf_errors:\n with open(f, 'r', encoding='utf-8') as fd:\n return cls.load(fd)\n else:\n with open(f, 'r', encoding='utf-8', errors='ignore') as fd:\n return cls.load(fd)\n except FileNotFoundError as fnfe:\n raise fnfe\n except Exception:\n raise Exception(\"Incorrect encoding detected in {}, please \"\n \"rebuild the dataset\".format(f))\n\n d = cls()\n for line in f.readlines():\n idx = line.rfind(' ')\n word = line[:idx]\n count = int(line[idx+1:])\n d.indices[word] = len(d.symbols)\n d.symbols.append(word)\n d.count.append(count)\n return d\n\n def save(self, f):\n \"\"\"Stores dictionary into a text file\"\"\"\n if isinstance(f, str):\n os.makedirs(os.path.dirname(f), exist_ok=True)\n with open(f, 'w', encoding='utf-8') as fd:\n return self.save(fd)\n for symbol, count in zip(self.symbols[self.nspecial:], self.count[self.nspecial:]):\n print('{} {}'.format(symbol, count), file=f)\n\n def dummy_sentence(self, length):\n t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()\n t[-1] = self.eos()\n return t\n", "path": "fairseq/data/dictionary.py" } ]
diff --git a/fairseq/data/dictionary.py b/fairseq/data/dictionary.py index cdfc286a6e..04bd696ed7 100644 --- a/fairseq/data/dictionary.py +++ b/fairseq/data/dictionary.py @@ -133,7 +133,6 @@ def finalize(self, threshold=-1, nwords=-1, padding_factor=8): i += 1 threshold_nwords += 1 - assert len(new_count) == self.nspecial or min(new_count[self.nspecial:]) >= threshold assert len(new_symbols) % padding_factor == 0 assert len(new_symbols) == len(new_indices)
python__mypy-8251
Bug: Mypy uses MD5 which causes a traceback in FIPS environment Hello, Mypy does not work when FIPS (Federal Information Processing Standards) is enabled on RHEL 8.1. The problem is that mypy is calculating MD5 hashes of files and crashes with a traceback: ``` # python3 -m mypy -p pcs --show-traceback --no-incremental /root/devel/pcs/pcs/__init__.py: error: INTERNAL ERROR -- Please try using mypy master on Github: https://mypy.rtfd.io/en/latest/common_issues.html#using-a-development-mypy-build Please report a bug at https://github.com/python/mypy/issues version: 0.740 Traceback (most recent call last): File "/usr/lib64/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/lib64/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "mypy/build.py", line 1845, in wrap_context File "mypy/build.py", line 1945, in parse_file File "mypy/fscache.py", line 259, in read ValueError: [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS /root/devel/pcs/pcs/__init__.py: : note: use --pdb to drop into pdb ``` `__init__.py` is an empty file. Upstream version doesn't work either: ``` # python3 -m mypy -p pcs --show-traceback --no-incremental /root/devel/pcs/pcs/__init__.py: error: INTERNAL ERROR -- Please try using mypy master on Github: https://mypy.rtfd.io/en/latest/common_issues.html#using-a-development-mypy-build Please report a bug at https://github.com/python/mypy/issues version: 0.750+dev.e99a2b57e160785b09f669e28d68f38d1970c476 Traceback (most recent call last): File "/usr/lib64/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/lib64/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/usr/local/lib/python3.6/site-packages/mypy/__main__.py", line 12, in <module> main(None, sys.stdout, sys.stderr) File "/usr/local/lib/python3.6/site-packages/mypy/main.py", line 89, in main res = build.build(sources, options, None, flush_errors, fscache, stdout, stderr) File "/usr/local/lib/python3.6/site-packages/mypy/build.py", line 167, in build sources, options, alt_lib_path, flush_errors, fscache, stdout, stderr, extra_plugins File "/usr/local/lib/python3.6/site-packages/mypy/build.py", line 235, in _build graph = dispatch(sources, manager, stdout) File "/usr/local/lib/python3.6/site-packages/mypy/build.py", line 2567, in dispatch graph = load_graph(sources, manager) File "/usr/local/lib/python3.6/site-packages/mypy/build.py", line 2726, in load_graph root_source=True) File "/usr/local/lib/python3.6/site-packages/mypy/build.py", line 1825, in __init__ self.parse_file() File "/usr/local/lib/python3.6/site-packages/mypy/build.py", line 2016, in parse_file self.ignore_all or self.options.ignore_errors) File "/usr/lib64/python3.6/contextlib.py", line 99, in __exit__ self.gen.throw(type, value, traceback) File "/usr/local/lib/python3.6/site-packages/mypy/build.py", line 1894, in wrap_context yield File "/usr/local/lib/python3.6/site-packages/mypy/build.py", line 1994, in parse_file source = decode_python_encoding(manager.fscache.read(path), File "/usr/local/lib/python3.6/site-packages/mypy/fscache.py", line 259, in read md5hash = hashlib.md5(data).hexdigest() ValueError: [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS /root/devel/pcs/pcs/__init__.py: : note: use --pdb to drop into pdb ``` Python version: ``` Python 3.6.8 (default, Oct 11 2019, 15:04:54) [GCC 8.3.1 20190507 (Red Hat 8.3.1-4)] on linux ``` Perhaps SHA1 could be used instead of MD5: ``` >>> import hashlib >>> hashlib.md5(b"test") Traceback (most recent call last): File "<stdin>", line 1, in <module> ValueError: [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS >>> hashlib.sha1(b"test") <sha1 HASH object @ 0x7fdaf70689e0> ``` Thanks for looking into this.
[ { "content": "\"\"\"Utility functions with no non-trivial dependencies.\"\"\"\nimport os\nimport pathlib\nimport re\nimport subprocess\nimport sys\nimport hashlib\n\nfrom typing import (\n TypeVar, List, Tuple, Optional, Dict, Sequence, Iterable, Container, IO, Callable\n)\nfrom typing_extensions import Final, Type, Literal\n\ntry:\n import curses\n import _curses # noqa\n CURSES_ENABLED = True\nexcept ImportError:\n CURSES_ENABLED = False\n\nT = TypeVar('T')\n\nENCODING_RE = \\\n re.compile(br'([ \\t\\v]*#.*(\\r\\n?|\\n))??[ \\t\\v]*#.*coding[:=][ \\t]*([-\\w.]+)') # type: Final\n\n# This works in most default terminals works (because it is ANSI standard). The problem\n# this tries to solve is that although it is a basic ANSI \"feature\", terminfo files\n# for most default terminals don't have dim termcap entry, so curses doesn't report it.\n# Potentially, we can choose a grey color that would look good on both white and black\n# background, but it is not easy, and again most default terminals are 8-color, not 256-color,\n# so we can't get the color code from curses.\nPLAIN_ANSI_DIM = '\\x1b[2m' # type: Final\n\nDEFAULT_SOURCE_OFFSET = 4 # type: Final\nDEFAULT_COLUMNS = 80 # type: Final\n\n# At least this number of columns will be shown on each side of\n# error location when printing source code snippet.\nMINIMUM_WIDTH = 20\n\n# VT100 color code processing was added in Windows 10, but only the second major update,\n# Threshold 2. Fortunately, everyone (even on LTSB, Long Term Support Branch) should\n# have a version of Windows 10 newer than this. Note that Windows 8 and below are not\n# supported, but are either going out of support, or make up only a few % of the market.\nMINIMUM_WINDOWS_MAJOR_VT100 = 10\nMINIMUM_WINDOWS_BUILD_VT100 = 10586\n\ndefault_python2_interpreter = \\\n ['python2', 'python', '/usr/bin/python', 'C:\\\\Python27\\\\python.exe'] # type: Final\n\n\ndef split_module_names(mod_name: str) -> List[str]:\n \"\"\"Return the module and all parent module names.\n\n So, if `mod_name` is 'a.b.c', this function will return\n ['a.b.c', 'a.b', and 'a'].\n \"\"\"\n out = [mod_name]\n while '.' in mod_name:\n mod_name = mod_name.rsplit('.', 1)[0]\n out.append(mod_name)\n return out\n\n\ndef module_prefix(modules: Iterable[str], target: str) -> Optional[str]:\n result = split_target(modules, target)\n if result is None:\n return None\n return result[0]\n\n\ndef split_target(modules: Iterable[str], target: str) -> Optional[Tuple[str, str]]:\n remaining = [] # type: List[str]\n while True:\n if target in modules:\n return target, '.'.join(remaining)\n components = target.rsplit('.', 1)\n if len(components) == 1:\n return None\n target = components[0]\n remaining.insert(0, components[1])\n\n\ndef short_type(obj: object) -> str:\n \"\"\"Return the last component of the type name of an object.\n\n If obj is None, return 'nil'. For example, if obj is 1, return 'int'.\n \"\"\"\n if obj is None:\n return 'nil'\n t = str(type(obj))\n return t.split('.')[-1].rstrip(\"'>\")\n\n\ndef find_python_encoding(text: bytes, pyversion: Tuple[int, int]) -> Tuple[str, int]:\n \"\"\"PEP-263 for detecting Python file encoding\"\"\"\n result = ENCODING_RE.match(text)\n if result:\n line = 2 if result.group(1) else 1\n encoding = result.group(3).decode('ascii')\n # Handle some aliases that Python is happy to accept and that are used in the wild.\n if encoding.startswith(('iso-latin-1-', 'latin-1-')) or encoding == 'iso-latin-1':\n encoding = 'latin-1'\n return encoding, line\n else:\n default_encoding = 'utf8' if pyversion[0] >= 3 else 'ascii'\n return default_encoding, -1\n\n\nclass DecodeError(Exception):\n \"\"\"Exception raised when a file cannot be decoded due to an unknown encoding type.\n\n Essentially a wrapper for the LookupError raised by `bytearray.decode`\n \"\"\"\n\n\ndef decode_python_encoding(source: bytes, pyversion: Tuple[int, int]) -> str:\n \"\"\"Read the Python file with while obeying PEP-263 encoding detection.\n\n Returns the source as a string.\n \"\"\"\n # check for BOM UTF-8 encoding and strip it out if present\n if source.startswith(b'\\xef\\xbb\\xbf'):\n encoding = 'utf8'\n source = source[3:]\n else:\n # look at first two lines and check if PEP-263 coding is present\n encoding, _ = find_python_encoding(source, pyversion)\n\n try:\n source_text = source.decode(encoding)\n except LookupError as lookuperr:\n raise DecodeError(str(lookuperr))\n return source_text\n\n\ndef read_py_file(path: str, read: Callable[[str], bytes],\n pyversion: Tuple[int, int]) -> Optional[List[str]]:\n \"\"\"Try reading a Python file as list of source lines.\n\n Return None if something goes wrong.\n \"\"\"\n try:\n source = read(path)\n except OSError:\n return None\n else:\n try:\n source_lines = decode_python_encoding(source, pyversion).splitlines()\n except DecodeError:\n return None\n return source_lines\n\n\ndef trim_source_line(line: str, max_len: int, col: int, min_width: int) -> Tuple[str, int]:\n \"\"\"Trim a line of source code to fit into max_len.\n\n Show 'min_width' characters on each side of 'col' (an error location). If either\n start or end is trimmed, this is indicated by adding '...' there.\n A typical result looks like this:\n ...some_variable = function_to_call(one_arg, other_arg) or...\n\n Return the trimmed string and the column offset to to adjust error location.\n \"\"\"\n if max_len < 2 * min_width + 1:\n # In case the window is too tiny it is better to still show something.\n max_len = 2 * min_width + 1\n\n # Trivial case: line already fits in.\n if len(line) <= max_len:\n return line, 0\n\n # If column is not too large so that there is still min_width after it,\n # the line doesn't need to be trimmed at the start.\n if col + min_width < max_len:\n return line[:max_len] + '...', 0\n\n # Otherwise, if the column is not too close to the end, trim both sides.\n if col < len(line) - min_width - 1:\n offset = col - max_len + min_width + 1\n return '...' + line[offset:col + min_width + 1] + '...', offset - 3\n\n # Finally, if the column is near the end, just trim the start.\n return '...' + line[-max_len:], len(line) - max_len - 3\n\n\ndef get_mypy_comments(source: str) -> List[Tuple[int, str]]:\n PREFIX = '# mypy: '\n # Don't bother splitting up the lines unless we know it is useful\n if PREFIX not in source:\n return []\n lines = source.split('\\n')\n results = []\n for i, line in enumerate(lines):\n if line.startswith(PREFIX):\n results.append((i + 1, line[len(PREFIX):]))\n\n return results\n\n\n_python2_interpreter = None # type: Optional[str]\n\n\ndef try_find_python2_interpreter() -> Optional[str]:\n global _python2_interpreter\n if _python2_interpreter:\n return _python2_interpreter\n for interpreter in default_python2_interpreter:\n try:\n retcode = subprocess.Popen([\n interpreter, '-c',\n 'import sys, typing; assert sys.version_info[:2] == (2, 7)'\n ]).wait()\n if not retcode:\n _python2_interpreter = interpreter\n return interpreter\n except OSError:\n pass\n return None\n\n\nPASS_TEMPLATE = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<testsuite errors=\"0\" failures=\"0\" name=\"mypy\" skips=\"0\" tests=\"1\" time=\"{time:.3f}\">\n <testcase classname=\"mypy\" file=\"mypy\" line=\"1\" name=\"mypy-py{ver}-{platform}\" time=\"{time:.3f}\">\n </testcase>\n</testsuite>\n\"\"\" # type: Final\n\nFAIL_TEMPLATE = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<testsuite errors=\"0\" failures=\"1\" name=\"mypy\" skips=\"0\" tests=\"1\" time=\"{time:.3f}\">\n <testcase classname=\"mypy\" file=\"mypy\" line=\"1\" name=\"mypy-py{ver}-{platform}\" time=\"{time:.3f}\">\n <failure message=\"mypy produced messages\">{text}</failure>\n </testcase>\n</testsuite>\n\"\"\" # type: Final\n\nERROR_TEMPLATE = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<testsuite errors=\"1\" failures=\"0\" name=\"mypy\" skips=\"0\" tests=\"1\" time=\"{time:.3f}\">\n <testcase classname=\"mypy\" file=\"mypy\" line=\"1\" name=\"mypy-py{ver}-{platform}\" time=\"{time:.3f}\">\n <error message=\"mypy produced errors\">{text}</error>\n </testcase>\n</testsuite>\n\"\"\" # type: Final\n\n\ndef write_junit_xml(dt: float, serious: bool, messages: List[str], path: str,\n version: str, platform: str) -> None:\n from xml.sax.saxutils import escape\n if not messages and not serious:\n xml = PASS_TEMPLATE.format(time=dt, ver=version, platform=platform)\n elif not serious:\n xml = FAIL_TEMPLATE.format(text=escape('\\n'.join(messages)), time=dt,\n ver=version, platform=platform)\n else:\n xml = ERROR_TEMPLATE.format(text=escape('\\n'.join(messages)), time=dt,\n ver=version, platform=platform)\n\n # checks for a directory structure in path and creates folders if needed\n xml_dirs = os.path.dirname(os.path.abspath(path))\n if not os.path.isdir(xml_dirs):\n os.makedirs(xml_dirs)\n\n with open(path, 'wb') as f:\n f.write(xml.encode('utf-8'))\n\n\nclass IdMapper:\n \"\"\"Generate integer ids for objects.\n\n Unlike id(), these start from 0 and increment by 1, and ids won't\n get reused across the life-time of IdMapper.\n\n Assume objects don't redefine __eq__ or __hash__.\n \"\"\"\n\n def __init__(self) -> None:\n self.id_map = {} # type: Dict[object, int]\n self.next_id = 0\n\n def id(self, o: object) -> int:\n if o not in self.id_map:\n self.id_map[o] = self.next_id\n self.next_id += 1\n return self.id_map[o]\n\n\ndef get_prefix(fullname: str) -> str:\n \"\"\"Drop the final component of a qualified name (e.g. ('x.y' -> 'x').\"\"\"\n return fullname.rsplit('.', 1)[0]\n\n\ndef correct_relative_import(cur_mod_id: str,\n relative: int,\n target: str,\n is_cur_package_init_file: bool) -> Tuple[str, bool]:\n if relative == 0:\n return target, True\n parts = cur_mod_id.split(\".\")\n rel = relative\n if is_cur_package_init_file:\n rel -= 1\n ok = len(parts) >= rel\n if rel != 0:\n cur_mod_id = \".\".join(parts[:-rel])\n return cur_mod_id + ((\".\" + target) if target else \"\"), ok\n\n\nfields_cache = {} # type: Final[Dict[Type[object], List[str]]]\n\n\ndef get_class_descriptors(cls: 'Type[object]') -> Sequence[str]:\n import inspect # Lazy import for minor startup speed win\n # Maintain a cache of type -> attributes defined by descriptors in the class\n # (that is, attributes from __slots__ and C extension classes)\n if cls not in fields_cache:\n members = inspect.getmembers(\n cls,\n lambda o: inspect.isgetsetdescriptor(o) or inspect.ismemberdescriptor(o))\n fields_cache[cls] = [x for x, y in members if x != '__weakref__' and x != '__dict__']\n return fields_cache[cls]\n\n\ndef replace_object_state(new: object, old: object, copy_dict: bool = False) -> None:\n \"\"\"Copy state of old node to the new node.\n\n This handles cases where there is __dict__ and/or attribute descriptors\n (either from slots or because the type is defined in a C extension module).\n\n Assume that both objects have the same __class__.\n \"\"\"\n if hasattr(old, '__dict__'):\n if copy_dict:\n new.__dict__ = dict(old.__dict__)\n else:\n new.__dict__ = old.__dict__\n\n for attr in get_class_descriptors(old.__class__):\n try:\n if hasattr(old, attr):\n setattr(new, attr, getattr(old, attr))\n elif hasattr(new, attr):\n delattr(new, attr)\n # There is no way to distinguish getsetdescriptors that allow\n # writes from ones that don't (I think?), so we just ignore\n # AttributeErrors if we need to.\n # TODO: What about getsetdescriptors that act like properties???\n except AttributeError:\n pass\n\n\ndef is_sub_path(path1: str, path2: str) -> bool:\n \"\"\"Given two paths, return if path1 is a sub-path of path2.\"\"\"\n return pathlib.Path(path2) in pathlib.Path(path1).parents\n\n\ndef hard_exit(status: int = 0) -> None:\n \"\"\"Kill the current process without fully cleaning up.\n\n This can be quite a bit faster than a normal exit() since objects are not freed.\n \"\"\"\n sys.stdout.flush()\n sys.stderr.flush()\n os._exit(status)\n\n\ndef unmangle(name: str) -> str:\n \"\"\"Remove internal suffixes from a short name.\"\"\"\n return name.rstrip(\"'\")\n\n\ndef get_unique_redefinition_name(name: str, existing: Container[str]) -> str:\n \"\"\"Get a simple redefinition name not present among existing.\n\n For example, for name 'foo' we try 'foo-redefinition', 'foo-redefinition2',\n 'foo-redefinition3', etc. until we find one that is not in existing.\n \"\"\"\n r_name = name + '-redefinition'\n if r_name not in existing:\n return r_name\n\n i = 2\n while r_name + str(i) in existing:\n i += 1\n return r_name + str(i)\n\n\ndef check_python_version(program: str) -> None:\n \"\"\"Report issues with the Python used to run mypy, dmypy, or stubgen\"\"\"\n # Check for known bad Python versions.\n if sys.version_info[:2] < (3, 5):\n sys.exit(\"Running {name} with Python 3.4 or lower is not supported; \"\n \"please upgrade to 3.5 or newer\".format(name=program))\n # this can be deleted once we drop support for 3.5\n if sys.version_info[:3] == (3, 5, 0):\n sys.exit(\"Running {name} with Python 3.5.0 is not supported; \"\n \"please upgrade to 3.5.1 or newer\".format(name=program))\n\n\ndef count_stats(errors: List[str]) -> Tuple[int, int]:\n \"\"\"Count total number of errors and files in error list.\"\"\"\n errors = [e for e in errors if ': error:' in e]\n files = {e.split(':')[0] for e in errors}\n return len(errors), len(files)\n\n\ndef split_words(msg: str) -> List[str]:\n \"\"\"Split line of text into words (but not within quoted groups).\"\"\"\n next_word = ''\n res = [] # type: List[str]\n allow_break = True\n for c in msg:\n if c == ' ' and allow_break:\n res.append(next_word)\n next_word = ''\n continue\n if c == '\"':\n allow_break = not allow_break\n next_word += c\n res.append(next_word)\n return res\n\n\ndef get_terminal_width() -> int:\n \"\"\"Get current terminal width if possible, otherwise return the default one.\"\"\"\n try:\n cols, _ = os.get_terminal_size()\n except OSError:\n return DEFAULT_COLUMNS\n else:\n if cols == 0:\n return DEFAULT_COLUMNS\n return cols\n\n\ndef soft_wrap(msg: str, max_len: int, first_offset: int,\n num_indent: int = 0) -> str:\n \"\"\"Wrap a long error message into few lines.\n\n Breaks will only happen between words, and never inside a quoted group\n (to avoid breaking types such as \"Union[int, str]\"). The 'first_offset' is\n the width before the start of first line.\n\n Pad every next line with 'num_indent' spaces. Every line will be at most 'max_len'\n characters, except if it is a single word or quoted group.\n\n For example:\n first_offset\n ------------------------\n path/to/file: error: 58: Some very long error message\n that needs to be split in separate lines.\n \"Long[Type, Names]\" are never split.\n ^^^^--------------------------------------------------\n num_indent max_len\n \"\"\"\n words = split_words(msg)\n next_line = words.pop(0)\n lines = [] # type: List[str]\n while words:\n next_word = words.pop(0)\n max_line_len = max_len - num_indent if lines else max_len - first_offset\n # Add 1 to account for space between words.\n if len(next_line) + len(next_word) + 1 <= max_line_len:\n next_line += ' ' + next_word\n else:\n lines.append(next_line)\n next_line = next_word\n lines.append(next_line)\n padding = '\\n' + ' ' * num_indent\n return padding.join(lines)\n\n\ndef hash_digest(data: bytes) -> str:\n \"\"\"Compute a hash digest of some data.\n\n We use a cryptographic hash because we want a low probability of\n accidental collision, but we don't really care about any of the\n cryptographic properties.\n \"\"\"\n return hashlib.md5(data).hexdigest()\n\n\nclass FancyFormatter:\n \"\"\"Apply color and bold font to terminal output.\n\n This currently only works on Linux and Mac.\n \"\"\"\n def __init__(self, f_out: IO[str], f_err: IO[str], show_error_codes: bool) -> None:\n self.show_error_codes = show_error_codes\n # Check if we are in a human-facing terminal on a supported platform.\n if sys.platform not in ('linux', 'darwin', 'win32'):\n self.dummy_term = True\n return\n force_color = int(os.getenv('MYPY_FORCE_COLOR', '0'))\n if not force_color and (not f_out.isatty() or not f_err.isatty()):\n self.dummy_term = True\n return\n if sys.platform == 'win32':\n self.dummy_term = not self.initialize_win_colors()\n else:\n self.dummy_term = not self.initialize_unix_colors()\n if not self.dummy_term:\n self.colors = {'red': self.RED, 'green': self.GREEN,\n 'blue': self.BLUE, 'yellow': self.YELLOW,\n 'none': ''}\n\n def initialize_win_colors(self) -> bool:\n \"\"\"Return True if initialization was successful and we can use colors, False otherwise\"\"\"\n # Windows ANSI escape sequences are only supported on Threshold 2 and above.\n # we check with an assert at runtime and an if check for mypy, as asserts do not\n # yet narrow platform\n assert sys.platform == 'win32'\n if sys.platform == 'win32':\n winver = sys.getwindowsversion()\n if (winver.major < MINIMUM_WINDOWS_MAJOR_VT100\n or winver.build < MINIMUM_WINDOWS_BUILD_VT100):\n return False\n import ctypes\n kernel32 = ctypes.windll.kernel32\n ENABLE_PROCESSED_OUTPUT = 0x1\n ENABLE_WRAP_AT_EOL_OUTPUT = 0x2\n ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4\n STD_OUTPUT_HANDLE = -11\n kernel32.SetConsoleMode(kernel32.GetStdHandle(STD_OUTPUT_HANDLE),\n ENABLE_PROCESSED_OUTPUT\n | ENABLE_WRAP_AT_EOL_OUTPUT\n | ENABLE_VIRTUAL_TERMINAL_PROCESSING)\n self.BOLD = '\\033[1m'\n self.UNDER = '\\033[4m'\n self.BLUE = '\\033[94m'\n self.GREEN = '\\033[92m'\n self.RED = '\\033[91m'\n self.YELLOW = '\\033[93m'\n self.NORMAL = '\\033[0m'\n self.DIM = '\\033[2m'\n return True\n return False\n\n def initialize_unix_colors(self) -> bool:\n \"\"\"Return True if initialization was successful and we can use colors, False otherwise\"\"\"\n if not CURSES_ENABLED:\n return False\n try:\n curses.setupterm()\n except curses.error:\n # Most likely terminfo not found.\n return False\n bold = curses.tigetstr('bold')\n under = curses.tigetstr('smul')\n set_color = curses.tigetstr('setaf')\n if not (bold and under and set_color):\n return False\n\n self.NORMAL = curses.tigetstr('sgr0').decode()\n self.BOLD = bold.decode()\n self.UNDER = under.decode()\n dim = curses.tigetstr('dim')\n # TODO: more reliable way to get gray color good for both dark and light schemes.\n self.DIM = dim.decode() if dim else PLAIN_ANSI_DIM\n\n self.BLUE = curses.tparm(set_color, curses.COLOR_BLUE).decode()\n self.GREEN = curses.tparm(set_color, curses.COLOR_GREEN).decode()\n self.RED = curses.tparm(set_color, curses.COLOR_RED).decode()\n self.YELLOW = curses.tparm(set_color, curses.COLOR_YELLOW).decode()\n return True\n\n def style(self, text: str, color: Literal['red', 'green', 'blue', 'yellow', 'none'],\n bold: bool = False, underline: bool = False, dim: bool = False) -> str:\n \"\"\"Apply simple color and style (underlined or bold).\"\"\"\n if self.dummy_term:\n return text\n if bold:\n start = self.BOLD\n else:\n start = ''\n if underline:\n start += self.UNDER\n if dim:\n start += self.DIM\n return start + self.colors[color] + text + self.NORMAL\n\n def fit_in_terminal(self, messages: List[str],\n fixed_terminal_width: Optional[int] = None) -> List[str]:\n \"\"\"Improve readability by wrapping error messages and trimming source code.\"\"\"\n width = (fixed_terminal_width or int(os.getenv('MYPY_FORCE_TERMINAL_WIDTH', '0')) or\n get_terminal_width())\n new_messages = messages.copy()\n for i, error in enumerate(messages):\n if ': error:' in error:\n loc, msg = error.split('error:', maxsplit=1)\n msg = soft_wrap(msg, width, first_offset=len(loc) + len('error: '))\n new_messages[i] = loc + 'error:' + msg\n if error.startswith(' ' * DEFAULT_SOURCE_OFFSET) and '^' not in error:\n # TODO: detecting source code highlights through an indent can be surprising.\n # Restore original error message and error location.\n error = error[DEFAULT_SOURCE_OFFSET:]\n column = messages[i+1].index('^') - DEFAULT_SOURCE_OFFSET\n\n # Let source have some space also on the right side, plus 6\n # to accommodate ... on each side.\n max_len = width - DEFAULT_SOURCE_OFFSET - 6\n source_line, offset = trim_source_line(error, max_len, column, MINIMUM_WIDTH)\n\n new_messages[i] = ' ' * DEFAULT_SOURCE_OFFSET + source_line\n # Also adjust the error marker position.\n new_messages[i+1] = ' ' * (DEFAULT_SOURCE_OFFSET + column - offset) + '^'\n return new_messages\n\n def colorize(self, error: str) -> str:\n \"\"\"Colorize an output line by highlighting the status and error code.\n\n If fixed_terminal_width is given, use it instead of calling get_terminal_width()\n (used by the daemon).\n \"\"\"\n if ': error:' in error:\n loc, msg = error.split('error:', maxsplit=1)\n if not self.show_error_codes:\n return (loc + self.style('error:', 'red', bold=True) +\n self.highlight_quote_groups(msg))\n codepos = msg.rfind('[')\n if codepos != -1:\n code = msg[codepos:]\n msg = msg[:codepos]\n else:\n code = \"\" # no error code specified\n return (loc + self.style('error:', 'red', bold=True) +\n self.highlight_quote_groups(msg) + self.style(code, 'yellow'))\n elif ': note:' in error:\n loc, msg = error.split('note:', maxsplit=1)\n return loc + self.style('note:', 'blue') + self.underline_link(msg)\n elif error.startswith(' ' * DEFAULT_SOURCE_OFFSET):\n # TODO: detecting source code highlights through an indent can be surprising.\n if '^' not in error:\n return self.style(error, 'none', dim=True)\n return self.style(error, 'red')\n else:\n return error\n\n def highlight_quote_groups(self, msg: str) -> str:\n \"\"\"Make groups quoted with double quotes bold (including quotes).\n\n This is used to highlight types, attribute names etc.\n \"\"\"\n if msg.count('\"') % 2:\n # Broken error message, don't do any formatting.\n return msg\n parts = msg.split('\"')\n out = ''\n for i, part in enumerate(parts):\n if i % 2 == 0:\n out += self.style(part, 'none')\n else:\n out += self.style('\"' + part + '\"', 'none', bold=True)\n return out\n\n def underline_link(self, note: str) -> str:\n \"\"\"Underline a link in a note message (if any).\n\n This assumes there is at most one link in the message.\n \"\"\"\n match = re.search(r'https?://\\S*', note)\n if not match:\n return note\n start = match.start()\n end = match.end()\n return (note[:start] +\n self.style(note[start:end], 'none', underline=True) +\n note[end:])\n\n def format_success(self, n_sources: int, use_color: bool = True) -> str:\n \"\"\"Format short summary in case of success.\n\n n_sources is total number of files passed directly on command line,\n i.e. excluding stubs and followed imports.\n \"\"\"\n msg = 'Success: no issues found in {}' \\\n ' source file{}'.format(n_sources, 's' if n_sources != 1 else '')\n if not use_color:\n return msg\n return self.style(msg, 'green', bold=True)\n\n def format_error(self, n_errors: int, n_files: int, n_sources: int,\n use_color: bool = True) -> str:\n \"\"\"Format a short summary in case of errors.\"\"\"\n msg = 'Found {} error{} in {} file{}' \\\n ' (checked {} source file{})'.format(n_errors, 's' if n_errors != 1 else '',\n n_files, 's' if n_files != 1 else '',\n n_sources, 's' if n_sources != 1 else '')\n if not use_color:\n return msg\n return self.style(msg, 'red', bold=True)\n", "path": "mypy/util.py" } ]
[ { "content": "\"\"\"Utility functions with no non-trivial dependencies.\"\"\"\nimport os\nimport pathlib\nimport re\nimport subprocess\nimport sys\nimport hashlib\n\nfrom typing import (\n TypeVar, List, Tuple, Optional, Dict, Sequence, Iterable, Container, IO, Callable\n)\nfrom typing_extensions import Final, Type, Literal\n\ntry:\n import curses\n import _curses # noqa\n CURSES_ENABLED = True\nexcept ImportError:\n CURSES_ENABLED = False\n\nT = TypeVar('T')\n\nENCODING_RE = \\\n re.compile(br'([ \\t\\v]*#.*(\\r\\n?|\\n))??[ \\t\\v]*#.*coding[:=][ \\t]*([-\\w.]+)') # type: Final\n\n# This works in most default terminals works (because it is ANSI standard). The problem\n# this tries to solve is that although it is a basic ANSI \"feature\", terminfo files\n# for most default terminals don't have dim termcap entry, so curses doesn't report it.\n# Potentially, we can choose a grey color that would look good on both white and black\n# background, but it is not easy, and again most default terminals are 8-color, not 256-color,\n# so we can't get the color code from curses.\nPLAIN_ANSI_DIM = '\\x1b[2m' # type: Final\n\nDEFAULT_SOURCE_OFFSET = 4 # type: Final\nDEFAULT_COLUMNS = 80 # type: Final\n\n# At least this number of columns will be shown on each side of\n# error location when printing source code snippet.\nMINIMUM_WIDTH = 20\n\n# VT100 color code processing was added in Windows 10, but only the second major update,\n# Threshold 2. Fortunately, everyone (even on LTSB, Long Term Support Branch) should\n# have a version of Windows 10 newer than this. Note that Windows 8 and below are not\n# supported, but are either going out of support, or make up only a few % of the market.\nMINIMUM_WINDOWS_MAJOR_VT100 = 10\nMINIMUM_WINDOWS_BUILD_VT100 = 10586\n\ndefault_python2_interpreter = \\\n ['python2', 'python', '/usr/bin/python', 'C:\\\\Python27\\\\python.exe'] # type: Final\n\n\ndef split_module_names(mod_name: str) -> List[str]:\n \"\"\"Return the module and all parent module names.\n\n So, if `mod_name` is 'a.b.c', this function will return\n ['a.b.c', 'a.b', and 'a'].\n \"\"\"\n out = [mod_name]\n while '.' in mod_name:\n mod_name = mod_name.rsplit('.', 1)[0]\n out.append(mod_name)\n return out\n\n\ndef module_prefix(modules: Iterable[str], target: str) -> Optional[str]:\n result = split_target(modules, target)\n if result is None:\n return None\n return result[0]\n\n\ndef split_target(modules: Iterable[str], target: str) -> Optional[Tuple[str, str]]:\n remaining = [] # type: List[str]\n while True:\n if target in modules:\n return target, '.'.join(remaining)\n components = target.rsplit('.', 1)\n if len(components) == 1:\n return None\n target = components[0]\n remaining.insert(0, components[1])\n\n\ndef short_type(obj: object) -> str:\n \"\"\"Return the last component of the type name of an object.\n\n If obj is None, return 'nil'. For example, if obj is 1, return 'int'.\n \"\"\"\n if obj is None:\n return 'nil'\n t = str(type(obj))\n return t.split('.')[-1].rstrip(\"'>\")\n\n\ndef find_python_encoding(text: bytes, pyversion: Tuple[int, int]) -> Tuple[str, int]:\n \"\"\"PEP-263 for detecting Python file encoding\"\"\"\n result = ENCODING_RE.match(text)\n if result:\n line = 2 if result.group(1) else 1\n encoding = result.group(3).decode('ascii')\n # Handle some aliases that Python is happy to accept and that are used in the wild.\n if encoding.startswith(('iso-latin-1-', 'latin-1-')) or encoding == 'iso-latin-1':\n encoding = 'latin-1'\n return encoding, line\n else:\n default_encoding = 'utf8' if pyversion[0] >= 3 else 'ascii'\n return default_encoding, -1\n\n\nclass DecodeError(Exception):\n \"\"\"Exception raised when a file cannot be decoded due to an unknown encoding type.\n\n Essentially a wrapper for the LookupError raised by `bytearray.decode`\n \"\"\"\n\n\ndef decode_python_encoding(source: bytes, pyversion: Tuple[int, int]) -> str:\n \"\"\"Read the Python file with while obeying PEP-263 encoding detection.\n\n Returns the source as a string.\n \"\"\"\n # check for BOM UTF-8 encoding and strip it out if present\n if source.startswith(b'\\xef\\xbb\\xbf'):\n encoding = 'utf8'\n source = source[3:]\n else:\n # look at first two lines and check if PEP-263 coding is present\n encoding, _ = find_python_encoding(source, pyversion)\n\n try:\n source_text = source.decode(encoding)\n except LookupError as lookuperr:\n raise DecodeError(str(lookuperr))\n return source_text\n\n\ndef read_py_file(path: str, read: Callable[[str], bytes],\n pyversion: Tuple[int, int]) -> Optional[List[str]]:\n \"\"\"Try reading a Python file as list of source lines.\n\n Return None if something goes wrong.\n \"\"\"\n try:\n source = read(path)\n except OSError:\n return None\n else:\n try:\n source_lines = decode_python_encoding(source, pyversion).splitlines()\n except DecodeError:\n return None\n return source_lines\n\n\ndef trim_source_line(line: str, max_len: int, col: int, min_width: int) -> Tuple[str, int]:\n \"\"\"Trim a line of source code to fit into max_len.\n\n Show 'min_width' characters on each side of 'col' (an error location). If either\n start or end is trimmed, this is indicated by adding '...' there.\n A typical result looks like this:\n ...some_variable = function_to_call(one_arg, other_arg) or...\n\n Return the trimmed string and the column offset to to adjust error location.\n \"\"\"\n if max_len < 2 * min_width + 1:\n # In case the window is too tiny it is better to still show something.\n max_len = 2 * min_width + 1\n\n # Trivial case: line already fits in.\n if len(line) <= max_len:\n return line, 0\n\n # If column is not too large so that there is still min_width after it,\n # the line doesn't need to be trimmed at the start.\n if col + min_width < max_len:\n return line[:max_len] + '...', 0\n\n # Otherwise, if the column is not too close to the end, trim both sides.\n if col < len(line) - min_width - 1:\n offset = col - max_len + min_width + 1\n return '...' + line[offset:col + min_width + 1] + '...', offset - 3\n\n # Finally, if the column is near the end, just trim the start.\n return '...' + line[-max_len:], len(line) - max_len - 3\n\n\ndef get_mypy_comments(source: str) -> List[Tuple[int, str]]:\n PREFIX = '# mypy: '\n # Don't bother splitting up the lines unless we know it is useful\n if PREFIX not in source:\n return []\n lines = source.split('\\n')\n results = []\n for i, line in enumerate(lines):\n if line.startswith(PREFIX):\n results.append((i + 1, line[len(PREFIX):]))\n\n return results\n\n\n_python2_interpreter = None # type: Optional[str]\n\n\ndef try_find_python2_interpreter() -> Optional[str]:\n global _python2_interpreter\n if _python2_interpreter:\n return _python2_interpreter\n for interpreter in default_python2_interpreter:\n try:\n retcode = subprocess.Popen([\n interpreter, '-c',\n 'import sys, typing; assert sys.version_info[:2] == (2, 7)'\n ]).wait()\n if not retcode:\n _python2_interpreter = interpreter\n return interpreter\n except OSError:\n pass\n return None\n\n\nPASS_TEMPLATE = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<testsuite errors=\"0\" failures=\"0\" name=\"mypy\" skips=\"0\" tests=\"1\" time=\"{time:.3f}\">\n <testcase classname=\"mypy\" file=\"mypy\" line=\"1\" name=\"mypy-py{ver}-{platform}\" time=\"{time:.3f}\">\n </testcase>\n</testsuite>\n\"\"\" # type: Final\n\nFAIL_TEMPLATE = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<testsuite errors=\"0\" failures=\"1\" name=\"mypy\" skips=\"0\" tests=\"1\" time=\"{time:.3f}\">\n <testcase classname=\"mypy\" file=\"mypy\" line=\"1\" name=\"mypy-py{ver}-{platform}\" time=\"{time:.3f}\">\n <failure message=\"mypy produced messages\">{text}</failure>\n </testcase>\n</testsuite>\n\"\"\" # type: Final\n\nERROR_TEMPLATE = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<testsuite errors=\"1\" failures=\"0\" name=\"mypy\" skips=\"0\" tests=\"1\" time=\"{time:.3f}\">\n <testcase classname=\"mypy\" file=\"mypy\" line=\"1\" name=\"mypy-py{ver}-{platform}\" time=\"{time:.3f}\">\n <error message=\"mypy produced errors\">{text}</error>\n </testcase>\n</testsuite>\n\"\"\" # type: Final\n\n\ndef write_junit_xml(dt: float, serious: bool, messages: List[str], path: str,\n version: str, platform: str) -> None:\n from xml.sax.saxutils import escape\n if not messages and not serious:\n xml = PASS_TEMPLATE.format(time=dt, ver=version, platform=platform)\n elif not serious:\n xml = FAIL_TEMPLATE.format(text=escape('\\n'.join(messages)), time=dt,\n ver=version, platform=platform)\n else:\n xml = ERROR_TEMPLATE.format(text=escape('\\n'.join(messages)), time=dt,\n ver=version, platform=platform)\n\n # checks for a directory structure in path and creates folders if needed\n xml_dirs = os.path.dirname(os.path.abspath(path))\n if not os.path.isdir(xml_dirs):\n os.makedirs(xml_dirs)\n\n with open(path, 'wb') as f:\n f.write(xml.encode('utf-8'))\n\n\nclass IdMapper:\n \"\"\"Generate integer ids for objects.\n\n Unlike id(), these start from 0 and increment by 1, and ids won't\n get reused across the life-time of IdMapper.\n\n Assume objects don't redefine __eq__ or __hash__.\n \"\"\"\n\n def __init__(self) -> None:\n self.id_map = {} # type: Dict[object, int]\n self.next_id = 0\n\n def id(self, o: object) -> int:\n if o not in self.id_map:\n self.id_map[o] = self.next_id\n self.next_id += 1\n return self.id_map[o]\n\n\ndef get_prefix(fullname: str) -> str:\n \"\"\"Drop the final component of a qualified name (e.g. ('x.y' -> 'x').\"\"\"\n return fullname.rsplit('.', 1)[0]\n\n\ndef correct_relative_import(cur_mod_id: str,\n relative: int,\n target: str,\n is_cur_package_init_file: bool) -> Tuple[str, bool]:\n if relative == 0:\n return target, True\n parts = cur_mod_id.split(\".\")\n rel = relative\n if is_cur_package_init_file:\n rel -= 1\n ok = len(parts) >= rel\n if rel != 0:\n cur_mod_id = \".\".join(parts[:-rel])\n return cur_mod_id + ((\".\" + target) if target else \"\"), ok\n\n\nfields_cache = {} # type: Final[Dict[Type[object], List[str]]]\n\n\ndef get_class_descriptors(cls: 'Type[object]') -> Sequence[str]:\n import inspect # Lazy import for minor startup speed win\n # Maintain a cache of type -> attributes defined by descriptors in the class\n # (that is, attributes from __slots__ and C extension classes)\n if cls not in fields_cache:\n members = inspect.getmembers(\n cls,\n lambda o: inspect.isgetsetdescriptor(o) or inspect.ismemberdescriptor(o))\n fields_cache[cls] = [x for x, y in members if x != '__weakref__' and x != '__dict__']\n return fields_cache[cls]\n\n\ndef replace_object_state(new: object, old: object, copy_dict: bool = False) -> None:\n \"\"\"Copy state of old node to the new node.\n\n This handles cases where there is __dict__ and/or attribute descriptors\n (either from slots or because the type is defined in a C extension module).\n\n Assume that both objects have the same __class__.\n \"\"\"\n if hasattr(old, '__dict__'):\n if copy_dict:\n new.__dict__ = dict(old.__dict__)\n else:\n new.__dict__ = old.__dict__\n\n for attr in get_class_descriptors(old.__class__):\n try:\n if hasattr(old, attr):\n setattr(new, attr, getattr(old, attr))\n elif hasattr(new, attr):\n delattr(new, attr)\n # There is no way to distinguish getsetdescriptors that allow\n # writes from ones that don't (I think?), so we just ignore\n # AttributeErrors if we need to.\n # TODO: What about getsetdescriptors that act like properties???\n except AttributeError:\n pass\n\n\ndef is_sub_path(path1: str, path2: str) -> bool:\n \"\"\"Given two paths, return if path1 is a sub-path of path2.\"\"\"\n return pathlib.Path(path2) in pathlib.Path(path1).parents\n\n\ndef hard_exit(status: int = 0) -> None:\n \"\"\"Kill the current process without fully cleaning up.\n\n This can be quite a bit faster than a normal exit() since objects are not freed.\n \"\"\"\n sys.stdout.flush()\n sys.stderr.flush()\n os._exit(status)\n\n\ndef unmangle(name: str) -> str:\n \"\"\"Remove internal suffixes from a short name.\"\"\"\n return name.rstrip(\"'\")\n\n\ndef get_unique_redefinition_name(name: str, existing: Container[str]) -> str:\n \"\"\"Get a simple redefinition name not present among existing.\n\n For example, for name 'foo' we try 'foo-redefinition', 'foo-redefinition2',\n 'foo-redefinition3', etc. until we find one that is not in existing.\n \"\"\"\n r_name = name + '-redefinition'\n if r_name not in existing:\n return r_name\n\n i = 2\n while r_name + str(i) in existing:\n i += 1\n return r_name + str(i)\n\n\ndef check_python_version(program: str) -> None:\n \"\"\"Report issues with the Python used to run mypy, dmypy, or stubgen\"\"\"\n # Check for known bad Python versions.\n if sys.version_info[:2] < (3, 5):\n sys.exit(\"Running {name} with Python 3.4 or lower is not supported; \"\n \"please upgrade to 3.5 or newer\".format(name=program))\n # this can be deleted once we drop support for 3.5\n if sys.version_info[:3] == (3, 5, 0):\n sys.exit(\"Running {name} with Python 3.5.0 is not supported; \"\n \"please upgrade to 3.5.1 or newer\".format(name=program))\n\n\ndef count_stats(errors: List[str]) -> Tuple[int, int]:\n \"\"\"Count total number of errors and files in error list.\"\"\"\n errors = [e for e in errors if ': error:' in e]\n files = {e.split(':')[0] for e in errors}\n return len(errors), len(files)\n\n\ndef split_words(msg: str) -> List[str]:\n \"\"\"Split line of text into words (but not within quoted groups).\"\"\"\n next_word = ''\n res = [] # type: List[str]\n allow_break = True\n for c in msg:\n if c == ' ' and allow_break:\n res.append(next_word)\n next_word = ''\n continue\n if c == '\"':\n allow_break = not allow_break\n next_word += c\n res.append(next_word)\n return res\n\n\ndef get_terminal_width() -> int:\n \"\"\"Get current terminal width if possible, otherwise return the default one.\"\"\"\n try:\n cols, _ = os.get_terminal_size()\n except OSError:\n return DEFAULT_COLUMNS\n else:\n if cols == 0:\n return DEFAULT_COLUMNS\n return cols\n\n\ndef soft_wrap(msg: str, max_len: int, first_offset: int,\n num_indent: int = 0) -> str:\n \"\"\"Wrap a long error message into few lines.\n\n Breaks will only happen between words, and never inside a quoted group\n (to avoid breaking types such as \"Union[int, str]\"). The 'first_offset' is\n the width before the start of first line.\n\n Pad every next line with 'num_indent' spaces. Every line will be at most 'max_len'\n characters, except if it is a single word or quoted group.\n\n For example:\n first_offset\n ------------------------\n path/to/file: error: 58: Some very long error message\n that needs to be split in separate lines.\n \"Long[Type, Names]\" are never split.\n ^^^^--------------------------------------------------\n num_indent max_len\n \"\"\"\n words = split_words(msg)\n next_line = words.pop(0)\n lines = [] # type: List[str]\n while words:\n next_word = words.pop(0)\n max_line_len = max_len - num_indent if lines else max_len - first_offset\n # Add 1 to account for space between words.\n if len(next_line) + len(next_word) + 1 <= max_line_len:\n next_line += ' ' + next_word\n else:\n lines.append(next_line)\n next_line = next_word\n lines.append(next_line)\n padding = '\\n' + ' ' * num_indent\n return padding.join(lines)\n\n\ndef hash_digest(data: bytes) -> str:\n \"\"\"Compute a hash digest of some data.\n\n We use a cryptographic hash because we want a low probability of\n accidental collision, but we don't really care about any of the\n cryptographic properties.\n \"\"\"\n # Once we drop Python 3.5 support, we should consider using\n # blake2b, which is faster.\n return hashlib.sha256(data).hexdigest()\n\n\nclass FancyFormatter:\n \"\"\"Apply color and bold font to terminal output.\n\n This currently only works on Linux and Mac.\n \"\"\"\n def __init__(self, f_out: IO[str], f_err: IO[str], show_error_codes: bool) -> None:\n self.show_error_codes = show_error_codes\n # Check if we are in a human-facing terminal on a supported platform.\n if sys.platform not in ('linux', 'darwin', 'win32'):\n self.dummy_term = True\n return\n force_color = int(os.getenv('MYPY_FORCE_COLOR', '0'))\n if not force_color and (not f_out.isatty() or not f_err.isatty()):\n self.dummy_term = True\n return\n if sys.platform == 'win32':\n self.dummy_term = not self.initialize_win_colors()\n else:\n self.dummy_term = not self.initialize_unix_colors()\n if not self.dummy_term:\n self.colors = {'red': self.RED, 'green': self.GREEN,\n 'blue': self.BLUE, 'yellow': self.YELLOW,\n 'none': ''}\n\n def initialize_win_colors(self) -> bool:\n \"\"\"Return True if initialization was successful and we can use colors, False otherwise\"\"\"\n # Windows ANSI escape sequences are only supported on Threshold 2 and above.\n # we check with an assert at runtime and an if check for mypy, as asserts do not\n # yet narrow platform\n assert sys.platform == 'win32'\n if sys.platform == 'win32':\n winver = sys.getwindowsversion()\n if (winver.major < MINIMUM_WINDOWS_MAJOR_VT100\n or winver.build < MINIMUM_WINDOWS_BUILD_VT100):\n return False\n import ctypes\n kernel32 = ctypes.windll.kernel32\n ENABLE_PROCESSED_OUTPUT = 0x1\n ENABLE_WRAP_AT_EOL_OUTPUT = 0x2\n ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4\n STD_OUTPUT_HANDLE = -11\n kernel32.SetConsoleMode(kernel32.GetStdHandle(STD_OUTPUT_HANDLE),\n ENABLE_PROCESSED_OUTPUT\n | ENABLE_WRAP_AT_EOL_OUTPUT\n | ENABLE_VIRTUAL_TERMINAL_PROCESSING)\n self.BOLD = '\\033[1m'\n self.UNDER = '\\033[4m'\n self.BLUE = '\\033[94m'\n self.GREEN = '\\033[92m'\n self.RED = '\\033[91m'\n self.YELLOW = '\\033[93m'\n self.NORMAL = '\\033[0m'\n self.DIM = '\\033[2m'\n return True\n return False\n\n def initialize_unix_colors(self) -> bool:\n \"\"\"Return True if initialization was successful and we can use colors, False otherwise\"\"\"\n if not CURSES_ENABLED:\n return False\n try:\n curses.setupterm()\n except curses.error:\n # Most likely terminfo not found.\n return False\n bold = curses.tigetstr('bold')\n under = curses.tigetstr('smul')\n set_color = curses.tigetstr('setaf')\n if not (bold and under and set_color):\n return False\n\n self.NORMAL = curses.tigetstr('sgr0').decode()\n self.BOLD = bold.decode()\n self.UNDER = under.decode()\n dim = curses.tigetstr('dim')\n # TODO: more reliable way to get gray color good for both dark and light schemes.\n self.DIM = dim.decode() if dim else PLAIN_ANSI_DIM\n\n self.BLUE = curses.tparm(set_color, curses.COLOR_BLUE).decode()\n self.GREEN = curses.tparm(set_color, curses.COLOR_GREEN).decode()\n self.RED = curses.tparm(set_color, curses.COLOR_RED).decode()\n self.YELLOW = curses.tparm(set_color, curses.COLOR_YELLOW).decode()\n return True\n\n def style(self, text: str, color: Literal['red', 'green', 'blue', 'yellow', 'none'],\n bold: bool = False, underline: bool = False, dim: bool = False) -> str:\n \"\"\"Apply simple color and style (underlined or bold).\"\"\"\n if self.dummy_term:\n return text\n if bold:\n start = self.BOLD\n else:\n start = ''\n if underline:\n start += self.UNDER\n if dim:\n start += self.DIM\n return start + self.colors[color] + text + self.NORMAL\n\n def fit_in_terminal(self, messages: List[str],\n fixed_terminal_width: Optional[int] = None) -> List[str]:\n \"\"\"Improve readability by wrapping error messages and trimming source code.\"\"\"\n width = (fixed_terminal_width or int(os.getenv('MYPY_FORCE_TERMINAL_WIDTH', '0')) or\n get_terminal_width())\n new_messages = messages.copy()\n for i, error in enumerate(messages):\n if ': error:' in error:\n loc, msg = error.split('error:', maxsplit=1)\n msg = soft_wrap(msg, width, first_offset=len(loc) + len('error: '))\n new_messages[i] = loc + 'error:' + msg\n if error.startswith(' ' * DEFAULT_SOURCE_OFFSET) and '^' not in error:\n # TODO: detecting source code highlights through an indent can be surprising.\n # Restore original error message and error location.\n error = error[DEFAULT_SOURCE_OFFSET:]\n column = messages[i+1].index('^') - DEFAULT_SOURCE_OFFSET\n\n # Let source have some space also on the right side, plus 6\n # to accommodate ... on each side.\n max_len = width - DEFAULT_SOURCE_OFFSET - 6\n source_line, offset = trim_source_line(error, max_len, column, MINIMUM_WIDTH)\n\n new_messages[i] = ' ' * DEFAULT_SOURCE_OFFSET + source_line\n # Also adjust the error marker position.\n new_messages[i+1] = ' ' * (DEFAULT_SOURCE_OFFSET + column - offset) + '^'\n return new_messages\n\n def colorize(self, error: str) -> str:\n \"\"\"Colorize an output line by highlighting the status and error code.\n\n If fixed_terminal_width is given, use it instead of calling get_terminal_width()\n (used by the daemon).\n \"\"\"\n if ': error:' in error:\n loc, msg = error.split('error:', maxsplit=1)\n if not self.show_error_codes:\n return (loc + self.style('error:', 'red', bold=True) +\n self.highlight_quote_groups(msg))\n codepos = msg.rfind('[')\n if codepos != -1:\n code = msg[codepos:]\n msg = msg[:codepos]\n else:\n code = \"\" # no error code specified\n return (loc + self.style('error:', 'red', bold=True) +\n self.highlight_quote_groups(msg) + self.style(code, 'yellow'))\n elif ': note:' in error:\n loc, msg = error.split('note:', maxsplit=1)\n return loc + self.style('note:', 'blue') + self.underline_link(msg)\n elif error.startswith(' ' * DEFAULT_SOURCE_OFFSET):\n # TODO: detecting source code highlights through an indent can be surprising.\n if '^' not in error:\n return self.style(error, 'none', dim=True)\n return self.style(error, 'red')\n else:\n return error\n\n def highlight_quote_groups(self, msg: str) -> str:\n \"\"\"Make groups quoted with double quotes bold (including quotes).\n\n This is used to highlight types, attribute names etc.\n \"\"\"\n if msg.count('\"') % 2:\n # Broken error message, don't do any formatting.\n return msg\n parts = msg.split('\"')\n out = ''\n for i, part in enumerate(parts):\n if i % 2 == 0:\n out += self.style(part, 'none')\n else:\n out += self.style('\"' + part + '\"', 'none', bold=True)\n return out\n\n def underline_link(self, note: str) -> str:\n \"\"\"Underline a link in a note message (if any).\n\n This assumes there is at most one link in the message.\n \"\"\"\n match = re.search(r'https?://\\S*', note)\n if not match:\n return note\n start = match.start()\n end = match.end()\n return (note[:start] +\n self.style(note[start:end], 'none', underline=True) +\n note[end:])\n\n def format_success(self, n_sources: int, use_color: bool = True) -> str:\n \"\"\"Format short summary in case of success.\n\n n_sources is total number of files passed directly on command line,\n i.e. excluding stubs and followed imports.\n \"\"\"\n msg = 'Success: no issues found in {}' \\\n ' source file{}'.format(n_sources, 's' if n_sources != 1 else '')\n if not use_color:\n return msg\n return self.style(msg, 'green', bold=True)\n\n def format_error(self, n_errors: int, n_files: int, n_sources: int,\n use_color: bool = True) -> str:\n \"\"\"Format a short summary in case of errors.\"\"\"\n msg = 'Found {} error{} in {} file{}' \\\n ' (checked {} source file{})'.format(n_errors, 's' if n_errors != 1 else '',\n n_files, 's' if n_files != 1 else '',\n n_sources, 's' if n_sources != 1 else '')\n if not use_color:\n return msg\n return self.style(msg, 'red', bold=True)\n", "path": "mypy/util.py" } ]
diff --git a/mypy/util.py b/mypy/util.py index d44e58da8fc3..f7c96e520f5c 100644 --- a/mypy/util.py +++ b/mypy/util.py @@ -476,7 +476,9 @@ def hash_digest(data: bytes) -> str: accidental collision, but we don't really care about any of the cryptographic properties. """ - return hashlib.md5(data).hexdigest() + # Once we drop Python 3.5 support, we should consider using + # blake2b, which is faster. + return hashlib.sha256(data).hexdigest() class FancyFormatter:
Cloud-CV__EvalAI-2012
Incorrect Fields in Jobs serializer *Observed code:* [here](https://github.com/Cloud-CV/EvalAI/blob/master/apps/jobs/serializers.py/#L54) ``` class Meta: model = LeaderboardData fields = "__all__" fields = ('id', 'participant_team_name', 'challenge_phase_split', 'leaderboard_schema', 'result') ``` *Expected Code:* ``` class Meta: model = LeaderboardData fields = ('id', 'participant_team_name', 'challenge_phase_split', 'leaderboard_schema', 'result') ```
[ { "content": "from django.contrib.auth.models import User\n\nfrom rest_framework import serializers\n\nfrom challenges.models import LeaderboardData\nfrom participants.models import Participant, ParticipantTeam\n\nfrom .models import Submission\n\n\nclass SubmissionSerializer(serializers.ModelSerializer):\n\n participant_team_name = serializers.SerializerMethodField()\n execution_time = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n context = kwargs.get('context')\n if context and context.get('request').method == 'POST':\n created_by = context.get('request').user\n kwargs['data']['created_by'] = created_by.pk\n\n participant_team = context.get('participant_team').pk\n kwargs['data']['participant_team'] = participant_team\n\n challenge_phase = context.get('challenge_phase').pk\n kwargs['data']['challenge_phase'] = challenge_phase\n\n super(SubmissionSerializer, self).__init__(*args, **kwargs)\n\n class Meta:\n model = Submission\n fields = ('id', 'participant_team', 'participant_team_name', 'execution_time', 'challenge_phase',\n 'created_by', 'status', 'input_file', 'stdout_file', 'stderr_file', 'submitted_at',\n 'method_name', 'method_description', 'project_url', 'publication_url', 'is_public',\n 'submission_result_file', 'when_made_public',)\n\n def get_participant_team_name(self, obj):\n return obj.participant_team.team_name\n\n def get_execution_time(self, obj):\n return obj.execution_time\n\n\nclass LeaderboardDataSerializer(serializers.ModelSerializer):\n\n participant_team_name = serializers.SerializerMethodField()\n leaderboard_schema = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n super(LeaderboardDataSerializer, self).__init__(*args, **kwargs)\n\n class Meta:\n model = LeaderboardData\n fields = \"__all__\"\n fields = ('id', 'participant_team_name', 'challenge_phase_split', 'leaderboard_schema', 'result')\n\n def get_participant_team_name(self, obj):\n return obj.submission.participant_team.team_name\n\n def get_leaderboard_schema(self, obj):\n return obj.leaderboard.schema\n\n\nclass ChallengeSubmissionManagementSerializer(serializers.ModelSerializer):\n\n participant_team = serializers.SerializerMethodField()\n challenge_phase = serializers.SerializerMethodField()\n created_by = serializers.SerializerMethodField()\n participant_team_members_email_ids = serializers.SerializerMethodField()\n created_at = serializers.SerializerMethodField()\n participant_team_members = serializers.SerializerMethodField()\n\n class Meta:\n model = Submission\n fields = ('id', 'participant_team', 'challenge_phase', 'created_by', 'status', 'is_public',\n 'submission_number', 'submitted_at', 'execution_time', 'input_file', 'stdout_file',\n 'stderr_file', 'submission_result_file', 'submission_metadata_file',\n 'participant_team_members_email_ids', 'created_at', 'method_name', 'participant_team_members',)\n\n def get_participant_team(self, obj):\n return obj.participant_team.team_name\n\n def get_challenge_phase(self, obj):\n return obj.challenge_phase.name\n\n def get_created_by(self, obj):\n return obj.created_by.username\n\n def get_participant_team_members_email_ids(self, obj):\n try:\n participant_team = ParticipantTeam.objects.get(team_name=obj.participant_team.team_name)\n except ParticipantTeam.DoesNotExist:\n return 'Participant team does not exist'\n\n participant_ids = Participant.objects.filter(team=participant_team).values_list('user_id', flat=True)\n return list(User.objects.filter(id__in=participant_ids).values_list('email', flat=True))\n\n def get_created_at(self, obj):\n return obj.created_at\n\n def get_participant_team_members(self, obj):\n try:\n participant_team = ParticipantTeam.objects.get(team_name=obj.participant_team.team_name)\n except ParticipantTeam.DoesNotExist:\n return 'Participant team does not exist'\n\n participant_ids = Participant.objects.filter(team=participant_team).values_list('user_id', flat=True)\n return list(User.objects.filter(id__in=participant_ids).values('username', 'email'))\n\n\nclass SubmissionCount(object):\n def __init__(self, submission_count):\n self.submission_count = submission_count\n\n\nclass SubmissionCountSerializer(serializers.Serializer):\n submission_count = serializers.IntegerField()\n\n\nclass LastSubmissionDateTime(object):\n def __init__(self, last_submission_datetime):\n self.last_submission_datetime = last_submission_datetime\n\n\nclass LastSubmissionDateTimeSerializer(serializers.Serializer):\n last_submission_datetime = serializers.DateTimeField()\n\n\nclass CreateLeaderboardDataSerializer(serializers.ModelSerializer):\n\n def __init__(self, *args, **kwargs):\n context = kwargs.get('context')\n if context and context.get('request').method == 'PUT':\n challenge_phase_split = context.get('challenge_phase_split')\n kwargs['data']['challenge_phase_split'] = challenge_phase_split.pk\n\n submission = context.get('submission').pk\n kwargs['data']['submission'] = submission\n\n kwargs['data']['leaderboard'] = challenge_phase_split.leaderboard.pk\n\n super(CreateLeaderboardDataSerializer, self).__init__(*args, **kwargs)\n\n class Meta:\n model = LeaderboardData\n fields = ('challenge_phase_split', 'submission', 'result', 'leaderboard')\n", "path": "apps/jobs/serializers.py" } ]
[ { "content": "from django.contrib.auth.models import User\n\nfrom rest_framework import serializers\n\nfrom challenges.models import LeaderboardData\nfrom participants.models import Participant, ParticipantTeam\n\nfrom .models import Submission\n\n\nclass SubmissionSerializer(serializers.ModelSerializer):\n\n participant_team_name = serializers.SerializerMethodField()\n execution_time = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n context = kwargs.get('context')\n if context and context.get('request').method == 'POST':\n created_by = context.get('request').user\n kwargs['data']['created_by'] = created_by.pk\n\n participant_team = context.get('participant_team').pk\n kwargs['data']['participant_team'] = participant_team\n\n challenge_phase = context.get('challenge_phase').pk\n kwargs['data']['challenge_phase'] = challenge_phase\n\n super(SubmissionSerializer, self).__init__(*args, **kwargs)\n\n class Meta:\n model = Submission\n fields = ('id', 'participant_team', 'participant_team_name', 'execution_time', 'challenge_phase',\n 'created_by', 'status', 'input_file', 'stdout_file', 'stderr_file', 'submitted_at',\n 'method_name', 'method_description', 'project_url', 'publication_url', 'is_public',\n 'submission_result_file', 'when_made_public',)\n\n def get_participant_team_name(self, obj):\n return obj.participant_team.team_name\n\n def get_execution_time(self, obj):\n return obj.execution_time\n\n\nclass LeaderboardDataSerializer(serializers.ModelSerializer):\n\n participant_team_name = serializers.SerializerMethodField()\n leaderboard_schema = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n super(LeaderboardDataSerializer, self).__init__(*args, **kwargs)\n\n class Meta:\n model = LeaderboardData\n fields = ('id', 'participant_team_name', 'challenge_phase_split', 'leaderboard_schema', 'result')\n\n def get_participant_team_name(self, obj):\n return obj.submission.participant_team.team_name\n\n def get_leaderboard_schema(self, obj):\n return obj.leaderboard.schema\n\n\nclass ChallengeSubmissionManagementSerializer(serializers.ModelSerializer):\n\n participant_team = serializers.SerializerMethodField()\n challenge_phase = serializers.SerializerMethodField()\n created_by = serializers.SerializerMethodField()\n participant_team_members_email_ids = serializers.SerializerMethodField()\n created_at = serializers.SerializerMethodField()\n participant_team_members = serializers.SerializerMethodField()\n\n class Meta:\n model = Submission\n fields = ('id', 'participant_team', 'challenge_phase', 'created_by', 'status', 'is_public',\n 'submission_number', 'submitted_at', 'execution_time', 'input_file', 'stdout_file',\n 'stderr_file', 'submission_result_file', 'submission_metadata_file',\n 'participant_team_members_email_ids', 'created_at', 'method_name', 'participant_team_members',)\n\n def get_participant_team(self, obj):\n return obj.participant_team.team_name\n\n def get_challenge_phase(self, obj):\n return obj.challenge_phase.name\n\n def get_created_by(self, obj):\n return obj.created_by.username\n\n def get_participant_team_members_email_ids(self, obj):\n try:\n participant_team = ParticipantTeam.objects.get(team_name=obj.participant_team.team_name)\n except ParticipantTeam.DoesNotExist:\n return 'Participant team does not exist'\n\n participant_ids = Participant.objects.filter(team=participant_team).values_list('user_id', flat=True)\n return list(User.objects.filter(id__in=participant_ids).values_list('email', flat=True))\n\n def get_created_at(self, obj):\n return obj.created_at\n\n def get_participant_team_members(self, obj):\n try:\n participant_team = ParticipantTeam.objects.get(team_name=obj.participant_team.team_name)\n except ParticipantTeam.DoesNotExist:\n return 'Participant team does not exist'\n\n participant_ids = Participant.objects.filter(team=participant_team).values_list('user_id', flat=True)\n return list(User.objects.filter(id__in=participant_ids).values('username', 'email'))\n\n\nclass SubmissionCount(object):\n def __init__(self, submission_count):\n self.submission_count = submission_count\n\n\nclass SubmissionCountSerializer(serializers.Serializer):\n submission_count = serializers.IntegerField()\n\n\nclass LastSubmissionDateTime(object):\n def __init__(self, last_submission_datetime):\n self.last_submission_datetime = last_submission_datetime\n\n\nclass LastSubmissionDateTimeSerializer(serializers.Serializer):\n last_submission_datetime = serializers.DateTimeField()\n\n\nclass CreateLeaderboardDataSerializer(serializers.ModelSerializer):\n\n def __init__(self, *args, **kwargs):\n context = kwargs.get('context')\n if context and context.get('request').method == 'PUT':\n challenge_phase_split = context.get('challenge_phase_split')\n kwargs['data']['challenge_phase_split'] = challenge_phase_split.pk\n\n submission = context.get('submission').pk\n kwargs['data']['submission'] = submission\n\n kwargs['data']['leaderboard'] = challenge_phase_split.leaderboard.pk\n\n super(CreateLeaderboardDataSerializer, self).__init__(*args, **kwargs)\n\n class Meta:\n model = LeaderboardData\n fields = ('challenge_phase_split', 'submission', 'result', 'leaderboard')\n", "path": "apps/jobs/serializers.py" } ]
diff --git a/apps/jobs/serializers.py b/apps/jobs/serializers.py index b411ec5b54..f95054c50d 100644 --- a/apps/jobs/serializers.py +++ b/apps/jobs/serializers.py @@ -51,7 +51,6 @@ def __init__(self, *args, **kwargs): class Meta: model = LeaderboardData - fields = "__all__" fields = ('id', 'participant_team_name', 'challenge_phase_split', 'leaderboard_schema', 'result') def get_participant_team_name(self, obj):
interlegis__sapl-2645
Total de registros encontrados em relatório está por página e não por total Em Comissões, Matérias em Tramitação, a contagem está relacionada ao número de matérias por página e não pelo total na unidade. Ex: https://sapl.divinopolis.mg.leg.br/comissao/101/materias-em-tramitacao ## Comportamento Esperado A tela deve trazer a quantidade total de registros recuperados, não o total por página. ## Comportamento Atual A página traz a quantidade de registros por página. ## Passos para Reproduzir (para bugs) 1. Acesse o link https://sapl.divinopolis.mg.leg.br/comissao/101/materias-em-tramitacao ## Seu Ambiente <!--- Inclua detalhes relevantes sobre o ambiente em que você presenciou/experienciou o bug. --> * Versão usada (_Release_): 3.1.147 * Nome e versão do navegador: Chrome * Nome e versão do Sistema Operacional (desktop ou mobile): Linux * Link para o seu projeto (Caso de fork deste projeto):
[ { "content": "import logging\n\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import F\nfrom django.http.response import HttpResponseRedirect\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import ListView\nfrom django.views.generic.base import RedirectView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import FormMixin\n\nfrom sapl.base.models import AppConfig as AppsAppConfig\nfrom sapl.comissoes.apps import AppConfig\nfrom sapl.comissoes.forms import (ComissaoForm, ComposicaoForm,\n DocumentoAcessorioCreateForm,\n DocumentoAcessorioEditForm,\n ParticipacaoCreateForm, ParticipacaoEditForm,\n PeriodoForm, ReuniaoForm)\nfrom sapl.crud.base import (RP_DETAIL, RP_LIST, Crud, CrudAux,\n MasterDetailCrud,\n PermissionRequiredForAppCrudMixin)\nfrom sapl.materia.models import MateriaLegislativa, Tramitacao\n\nfrom .models import (CargoComissao, Comissao, Composicao, DocumentoAcessorio,\n Participacao, Periodo, Reuniao, TipoComissao)\n\n\ndef pegar_url_composicao(pk):\n participacao = Participacao.objects.get(id=pk)\n comp_pk = participacao.composicao.pk\n url = reverse('sapl.comissoes:composicao_detail', kwargs={'pk': comp_pk})\n return url\n\n\ndef pegar_url_reuniao(pk):\n documentoacessorio = DocumentoAcessorio.objects.get(id=pk)\n r_pk = documentoacessorio.reuniao.pk\n url = reverse('sapl.comissoes:reuniao_detail', kwargs={'pk': r_pk})\n return url\n\nCargoCrud = CrudAux.build(CargoComissao, 'cargo_comissao')\n\nTipoComissaoCrud = CrudAux.build(\n TipoComissao, 'tipo_comissao', list_field_names=[\n 'sigla', 'nome', 'natureza', 'dispositivo_regimental'])\n\n\nclass PeriodoComposicaoCrud(CrudAux):\n model = Periodo\n\n class CreateView(CrudAux.CreateView):\n form_class = PeriodoForm\n\n class UpdateView(CrudAux.UpdateView):\n form_class = PeriodoForm\n\n # class ListView(CrudAux.ListView):\n\n\nclass ParticipacaoCrud(MasterDetailCrud):\n model = Participacao\n parent_field = 'composicao__comissao'\n public = [RP_DETAIL, ]\n ListView = None\n link_return_to_parent_field = True\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['composicao', 'parlamentar', 'cargo']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = ParticipacaoCreateForm\n\n def get_initial(self):\n initial = super().get_initial()\n initial['parent_pk'] = self.kwargs['pk']\n return initial\n\n class UpdateView(MasterDetailCrud.UpdateView):\n layout_key = 'ParticipacaoEdit'\n form_class = ParticipacaoEditForm\n\n class DeleteView(MasterDetailCrud.DeleteView):\n\n def get_success_url(self):\n composicao_comissao_pk = self.object.composicao.comissao.pk\n composicao_pk = self.object.composicao.pk\n return '{}?pk={}'.format(reverse('sapl.comissoes:composicao_list',\n args=[composicao_comissao_pk]),\n composicao_pk)\n\n\nclass ComposicaoCrud(MasterDetailCrud):\n model = Composicao\n parent_field = 'comissao'\n model_set = 'participacao_set'\n public = [RP_LIST, RP_DETAIL, ]\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = ComposicaoForm\n\n def get_initial(self):\n comissao = Comissao.objects.get(id=self.kwargs['pk'])\n return {'comissao': comissao}\n\n class ListView(MasterDetailCrud.ListView):\n logger = logging.getLogger(__name__)\n template_name = \"comissoes/composicao_list.html\"\n paginate_by = None\n\n def take_composicao_pk(self):\n \n username = self.request.user.username\n try:\n self.logger.debug('user=' + username + '. Tentando obter pk da composição.')\n return int(self.request.GET['pk'])\n except Exception as e:\n self.logger.error('user=' + username + '. Erro ao obter pk da composição. Retornado 0. ' + str(e))\n return 0\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n composicao_pk = self.take_composicao_pk()\n\n if composicao_pk == 0:\n # Composicao eh ordenada por Periodo, que por sua vez esta em\n # ordem descrescente de data de inicio (issue #1920)\n ultima_composicao = context['composicao_list'].first()\n if ultima_composicao:\n context['composicao_pk'] = ultima_composicao.pk\n else:\n context['composicao_pk'] = 0\n else:\n context['composicao_pk'] = composicao_pk\n\n context['participacao_set'] = Participacao.objects.filter(\n composicao__pk=context['composicao_pk']\n ).order_by('id')\n return context\n\n\nclass ComissaoCrud(Crud):\n model = Comissao\n help_topic = 'modulo_comissoes'\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['nome', 'sigla', 'tipo',\n 'data_criacao', 'data_extincao', 'ativa']\n ordering = '-ativa', 'sigla'\n\n class CreateView(Crud.CreateView):\n form_class = ComissaoForm\n\n def form_valid(self, form):\n return super(Crud.CreateView, self).form_valid(form)\n\n class UpdateView(Crud.UpdateView):\n form_class = ComissaoForm\n\n def form_valid(self, form):\n return super(Crud.UpdateView, self).form_valid(form)\n\n\nclass MateriasTramitacaoListView(ListView):\n template_name = \"comissoes/materias_em_tramitacao.html\"\n paginate_by = 10\n\n def get_queryset(self):\n # FIXME: Otimizar consulta\n ts = Tramitacao.objects.order_by(\n 'materia', '-data_tramitacao', '-id').annotate(\n comissao=F('unidade_tramitacao_destino__comissao')).distinct(\n 'materia').values_list('materia', 'comissao')\n\n ts = list(filter(lambda x: x[1] == int(self.kwargs['pk']), ts))\n ts = list(zip(*ts))\n ts = ts[0] if ts else []\n\n materias = MateriaLegislativa.objects.filter(\n pk__in=ts).order_by('tipo', '-ano', '-numero')\n\n return materias\n\n def get_context_data(self, **kwargs):\n context = super(\n MateriasTramitacaoListView, self).get_context_data(**kwargs)\n context['object'] = Comissao.objects.get(id=self.kwargs['pk'])\n return context\n\n\nclass ReuniaoCrud(MasterDetailCrud):\n model = Reuniao\n parent_field = 'comissao'\n model_set = 'documentoacessorio_set'\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['data', 'nome', 'tema']\n\n class ListView(MasterDetailCrud.ListView):\n logger = logging.getLogger(__name__)\n paginate_by = 10\n\n def take_reuniao_pk(self):\n\n username = self.request.user.username\n try:\n self.logger.debug('user=' + username + '. Tentando obter pk da reunião.')\n return int(self.request.GET['pk'])\n except Exception as e:\n self.logger.error('user=' + username + '. Erro ao obter pk da reunião. Retornado 0. ' + str(e))\n return 0\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n reuniao_pk = self.take_reuniao_pk()\n\n if reuniao_pk == 0:\n ultima_reuniao = list(context['reuniao_list'])\n if len(ultima_reuniao) > 0:\n ultimo = ultima_reuniao[-1]\n context['reuniao_pk'] = ultimo.pk\n else:\n context['reuniao_pk'] = 0\n else:\n context['reuniao_pk'] = reuniao_pk\n\n context['documentoacessorio_set'] = DocumentoAcessorio.objects.filter(\n reuniao__pk=context['reuniao_pk']\n ).order_by('id')\n return context\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = ReuniaoForm\n\n def get_initial(self):\n return {'comissao': self.object.comissao}\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = ReuniaoForm\n\n def get_initial(self):\n comissao = Comissao.objects.get(id=self.kwargs['pk'])\n\n return {'comissao': comissao}\n\n\nclass DocumentoAcessorioCrud(MasterDetailCrud):\n model = DocumentoAcessorio\n parent_field = 'reuniao__comissao'\n public = [RP_DETAIL, ]\n ListView = None\n link_return_to_parent_field = True\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['nome', 'tipo', 'data', 'autor', 'arquivo']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = DocumentoAcessorioCreateForm\n\n def get_initial(self):\n initial = super().get_initial()\n initial['parent_pk'] = self.kwargs['pk']\n return initial\n\n class UpdateView(MasterDetailCrud.UpdateView):\n layout_key = 'DocumentoAcessorioEdit'\n form_class = DocumentoAcessorioEditForm\n\n class DeleteView(MasterDetailCrud.DeleteView):\n\n def delete(self, *args, **kwargs):\n obj = self.get_object()\n obj.delete()\n return HttpResponseRedirect(\n reverse('sapl.comissoes:reuniao_detail',\n kwargs={'pk': obj.reuniao.pk}))\n", "path": "sapl/comissoes/views.py" } ]
[ { "content": "import logging\n\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import F\nfrom django.http.response import HttpResponseRedirect\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import ListView\nfrom django.views.generic.base import RedirectView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import FormMixin\n\nfrom sapl.base.models import AppConfig as AppsAppConfig\nfrom sapl.comissoes.apps import AppConfig\nfrom sapl.comissoes.forms import (ComissaoForm, ComposicaoForm,\n DocumentoAcessorioCreateForm,\n DocumentoAcessorioEditForm,\n ParticipacaoCreateForm, ParticipacaoEditForm,\n PeriodoForm, ReuniaoForm)\nfrom sapl.crud.base import (RP_DETAIL, RP_LIST, Crud, CrudAux,\n MasterDetailCrud,\n PermissionRequiredForAppCrudMixin)\nfrom sapl.materia.models import MateriaLegislativa, Tramitacao\n\nfrom .models import (CargoComissao, Comissao, Composicao, DocumentoAcessorio,\n Participacao, Periodo, Reuniao, TipoComissao)\n\n\ndef pegar_url_composicao(pk):\n participacao = Participacao.objects.get(id=pk)\n comp_pk = participacao.composicao.pk\n url = reverse('sapl.comissoes:composicao_detail', kwargs={'pk': comp_pk})\n return url\n\n\ndef pegar_url_reuniao(pk):\n documentoacessorio = DocumentoAcessorio.objects.get(id=pk)\n r_pk = documentoacessorio.reuniao.pk\n url = reverse('sapl.comissoes:reuniao_detail', kwargs={'pk': r_pk})\n return url\n\nCargoCrud = CrudAux.build(CargoComissao, 'cargo_comissao')\n\nTipoComissaoCrud = CrudAux.build(\n TipoComissao, 'tipo_comissao', list_field_names=[\n 'sigla', 'nome', 'natureza', 'dispositivo_regimental'])\n\n\nclass PeriodoComposicaoCrud(CrudAux):\n model = Periodo\n\n class CreateView(CrudAux.CreateView):\n form_class = PeriodoForm\n\n class UpdateView(CrudAux.UpdateView):\n form_class = PeriodoForm\n\n # class ListView(CrudAux.ListView):\n\n\nclass ParticipacaoCrud(MasterDetailCrud):\n model = Participacao\n parent_field = 'composicao__comissao'\n public = [RP_DETAIL, ]\n ListView = None\n link_return_to_parent_field = True\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['composicao', 'parlamentar', 'cargo']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = ParticipacaoCreateForm\n\n def get_initial(self):\n initial = super().get_initial()\n initial['parent_pk'] = self.kwargs['pk']\n return initial\n\n class UpdateView(MasterDetailCrud.UpdateView):\n layout_key = 'ParticipacaoEdit'\n form_class = ParticipacaoEditForm\n\n class DeleteView(MasterDetailCrud.DeleteView):\n\n def get_success_url(self):\n composicao_comissao_pk = self.object.composicao.comissao.pk\n composicao_pk = self.object.composicao.pk\n return '{}?pk={}'.format(reverse('sapl.comissoes:composicao_list',\n args=[composicao_comissao_pk]),\n composicao_pk)\n\n\nclass ComposicaoCrud(MasterDetailCrud):\n model = Composicao\n parent_field = 'comissao'\n model_set = 'participacao_set'\n public = [RP_LIST, RP_DETAIL, ]\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = ComposicaoForm\n\n def get_initial(self):\n comissao = Comissao.objects.get(id=self.kwargs['pk'])\n return {'comissao': comissao}\n\n class ListView(MasterDetailCrud.ListView):\n logger = logging.getLogger(__name__)\n template_name = \"comissoes/composicao_list.html\"\n paginate_by = None\n\n def take_composicao_pk(self):\n \n username = self.request.user.username\n try:\n self.logger.debug('user=' + username + '. Tentando obter pk da composição.')\n return int(self.request.GET['pk'])\n except Exception as e:\n self.logger.error('user=' + username + '. Erro ao obter pk da composição. Retornado 0. ' + str(e))\n return 0\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n composicao_pk = self.take_composicao_pk()\n\n if composicao_pk == 0:\n # Composicao eh ordenada por Periodo, que por sua vez esta em\n # ordem descrescente de data de inicio (issue #1920)\n ultima_composicao = context['composicao_list'].first()\n if ultima_composicao:\n context['composicao_pk'] = ultima_composicao.pk\n else:\n context['composicao_pk'] = 0\n else:\n context['composicao_pk'] = composicao_pk\n\n context['participacao_set'] = Participacao.objects.filter(\n composicao__pk=context['composicao_pk']\n ).order_by('id')\n return context\n\n\nclass ComissaoCrud(Crud):\n model = Comissao\n help_topic = 'modulo_comissoes'\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['nome', 'sigla', 'tipo',\n 'data_criacao', 'data_extincao', 'ativa']\n ordering = '-ativa', 'sigla'\n\n class CreateView(Crud.CreateView):\n form_class = ComissaoForm\n\n def form_valid(self, form):\n return super(Crud.CreateView, self).form_valid(form)\n\n class UpdateView(Crud.UpdateView):\n form_class = ComissaoForm\n\n def form_valid(self, form):\n return super(Crud.UpdateView, self).form_valid(form)\n\n\nclass MateriasTramitacaoListView(ListView):\n template_name = \"comissoes/materias_em_tramitacao.html\"\n paginate_by = 10\n\n def get_queryset(self):\n # FIXME: Otimizar consulta\n ts = Tramitacao.objects.order_by(\n 'materia', '-data_tramitacao', '-id').annotate(\n comissao=F('unidade_tramitacao_destino__comissao')).distinct(\n 'materia').values_list('materia', 'comissao')\n\n ts = list(filter(lambda x: x[1] == int(self.kwargs['pk']), ts))\n ts = list(zip(*ts))\n ts = ts[0] if ts else []\n\n materias = MateriaLegislativa.objects.filter(\n pk__in=ts).order_by('tipo', '-ano', '-numero')\n\n return materias\n\n def get_context_data(self, **kwargs):\n context = super(\n MateriasTramitacaoListView, self).get_context_data(**kwargs)\n context['object'] = Comissao.objects.get(id=self.kwargs['pk'])\n context['qtde'] = self.object_list.count()\n return context\n\n\nclass ReuniaoCrud(MasterDetailCrud):\n model = Reuniao\n parent_field = 'comissao'\n model_set = 'documentoacessorio_set'\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['data', 'nome', 'tema']\n\n class ListView(MasterDetailCrud.ListView):\n logger = logging.getLogger(__name__)\n paginate_by = 10\n\n def take_reuniao_pk(self):\n\n username = self.request.user.username\n try:\n self.logger.debug('user=' + username + '. Tentando obter pk da reunião.')\n return int(self.request.GET['pk'])\n except Exception as e:\n self.logger.error('user=' + username + '. Erro ao obter pk da reunião. Retornado 0. ' + str(e))\n return 0\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n reuniao_pk = self.take_reuniao_pk()\n\n if reuniao_pk == 0:\n ultima_reuniao = list(context['reuniao_list'])\n if len(ultima_reuniao) > 0:\n ultimo = ultima_reuniao[-1]\n context['reuniao_pk'] = ultimo.pk\n else:\n context['reuniao_pk'] = 0\n else:\n context['reuniao_pk'] = reuniao_pk\n\n context['documentoacessorio_set'] = DocumentoAcessorio.objects.filter(\n reuniao__pk=context['reuniao_pk']\n ).order_by('id')\n return context\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = ReuniaoForm\n\n def get_initial(self):\n return {'comissao': self.object.comissao}\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = ReuniaoForm\n\n def get_initial(self):\n comissao = Comissao.objects.get(id=self.kwargs['pk'])\n\n return {'comissao': comissao}\n\n\nclass DocumentoAcessorioCrud(MasterDetailCrud):\n model = DocumentoAcessorio\n parent_field = 'reuniao__comissao'\n public = [RP_DETAIL, ]\n ListView = None\n link_return_to_parent_field = True\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['nome', 'tipo', 'data', 'autor', 'arquivo']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = DocumentoAcessorioCreateForm\n\n def get_initial(self):\n initial = super().get_initial()\n initial['parent_pk'] = self.kwargs['pk']\n return initial\n\n class UpdateView(MasterDetailCrud.UpdateView):\n layout_key = 'DocumentoAcessorioEdit'\n form_class = DocumentoAcessorioEditForm\n\n class DeleteView(MasterDetailCrud.DeleteView):\n\n def delete(self, *args, **kwargs):\n obj = self.get_object()\n obj.delete()\n return HttpResponseRedirect(\n reverse('sapl.comissoes:reuniao_detail',\n kwargs={'pk': obj.reuniao.pk}))\n", "path": "sapl/comissoes/views.py" } ]
diff --git a/sapl/comissoes/views.py b/sapl/comissoes/views.py index 743eaa58f..d6d129b11 100644 --- a/sapl/comissoes/views.py +++ b/sapl/comissoes/views.py @@ -186,6 +186,7 @@ def get_context_data(self, **kwargs): context = super( MateriasTramitacaoListView, self).get_context_data(**kwargs) context['object'] = Comissao.objects.get(id=self.kwargs['pk']) + context['qtde'] = self.object_list.count() return context diff --git a/sapl/templates/comissoes/materias_em_tramitacao.html b/sapl/templates/comissoes/materias_em_tramitacao.html index 9f197d1ce..32860df54 100644 --- a/sapl/templates/comissoes/materias_em_tramitacao.html +++ b/sapl/templates/comissoes/materias_em_tramitacao.html @@ -12,7 +12,7 @@ <h1 class="page-header"> {% block detail_content %} <fieldset> <legend>{{comissao}}</legend> - <b>Há {{page_obj|length}} matéria(s) em tramitação nesta unidade.</b> <br><br> + <b>Há {{qtde}} matéria(s) em tramitação nesta unidade.</b> <br><br> {% for materia in page_obj %} <b><a href="{% url 'sapl.materia:materialegislativa_detail' materia.id %}"> {{materia.tipo.sigla}} {{materia.numero}} {{materia.ano}} - {{materia.tipo}}
instadeepai__Mava-654
[TEST] Jax MAPPO System Integration ### What do you want to test? Jax MAPPO full integration test ### Outline of test structure * Unit tests (if possible) * Test component interactions * Ensure not crashing during standard executor and trainer steps ### Definition of done Passing checks, cover all basic component interactions, edge cases considered ### Mandatory checklist before making a PR * [ ] The success criteria laid down in “Definition of done” are met. * [ ] Test code is documented - docstrings for methods and classes, static types for arguments. * [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.
[ { "content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"General launcher for systems\"\"\"\nfrom typing import Any, Dict, List, Optional, Union\n\nimport launchpad as lp\nimport reverb\n\nfrom mava.utils import lp_utils\nfrom mava.utils.builder_utils import copy_node_fn\n\n\nclass NodeType:\n \"\"\"Specify launchpad node types that systems can use.\"\"\"\n\n reverb = lp.ReverbNode\n courier = lp.CourierNode\n\n\nclass Launcher:\n \"\"\"This mava launcher can be used to launch multi-node systems using either single \\\n or distributed computation.\"\"\"\n\n def __init__(\n self,\n multi_process: bool,\n nodes_on_gpu: List = [],\n single_process_trainer_period: int = 1,\n single_process_evaluator_period: int = 10,\n single_process_max_episodes: Optional[int] = None,\n name: str = \"System\",\n terminal: str = \"current_terminal\",\n lp_launch_type: Union[\n str, lp.LaunchType\n ] = lp.LaunchType.LOCAL_MULTI_PROCESSING,\n ) -> None:\n \"\"\"Initialise the launcher.\n\n If multi-process, set up the launchpad program.\n Otherwise, create a dictionary for the nodes in the system.\n\n Args:\n multi_process : whether to use launchpad to run nodes on separate processes.\n nodes_on_gpu : which nodes should be run on the GPU.\n single_process_trainer_period : number of episodes between single process\n trainer steps.\n single_process_evaluator_period : num episodes between single process\n evaluator steps.\n single_process_max_episodes: maximum number of episodes to run\n before termination.\n name : launchpad program name.\n terminal : terminal for launchpad processes to be shown on.\n lp_launch_type: launchpad launch type.\n \"\"\"\n self._multi_process = multi_process\n self._name = name\n self._single_process_trainer_period = single_process_trainer_period\n self._single_process_evaluator_period = single_process_evaluator_period\n self._single_process_max_episodes = single_process_max_episodes\n self._terminal = terminal\n self._lp_launch_type = lp_launch_type\n if multi_process:\n self._program = lp.Program(name=name)\n self._nodes_on_gpu = nodes_on_gpu\n else:\n self._nodes: List = []\n self._node_dict: Dict = {\n \"data_server\": None,\n \"parameter_server\": None,\n \"executor\": None,\n \"evaluator\": None,\n \"trainer\": None,\n }\n\n def add(\n self,\n node_fn: Any,\n arguments: Any = [],\n node_type: Union[lp.ReverbNode, lp.CourierNode] = NodeType.courier,\n name: str = \"Node\",\n ) -> Any:\n \"\"\"Add a node to the system.\n\n If multi-processing, add a node to the existing launchpad program,\n grouped under the given name.\n This means that when multi-processing,\n you can have multiple nodes of the same name (e.g. executor).\n If system is single-process, only one node per name is allowed in the system.\n\n Args:\n node_fn : Function returning the system process that will run on the node.\n arguments : Arguments used when initialising the system process.\n node_type : Type of launchpad node to use.\n name : Node name (e.g. executor).\n\n Raises:\n ValueError: if single-process and node name is not supported.\n ValueError: if single-process and trying to init a node more than once.\n\n Returns:\n The system process or launchpad node.\n \"\"\"\n # Create a list of arguments\n if type(arguments) is not list:\n arguments = [arguments]\n\n if self._multi_process:\n with self._program.group(name):\n node = self._program.add_node(node_type(node_fn, *arguments))\n return node\n else:\n if name not in self._node_dict:\n raise ValueError(\n f\"{name} is not a valid node name.\"\n + \"Single process currently only supports \"\n + \"nodes named: {list(self._node_dict.keys())}\"\n )\n elif self._node_dict[name] is not None:\n raise ValueError(\n f\"Node named {name} initialised more than once.\"\n + \"Single process currently only supports one node per type.\"\n )\n\n node_fn = copy_node_fn(node_fn)\n process = node_fn(*arguments)\n if node_type == lp.ReverbNode:\n # Assigning server to self to keep it alive.\n self._replay_server = reverb.Server(process, port=None)\n process = reverb.Client(f\"localhost:{self._replay_server.port}\")\n self._nodes.append(process)\n self._node_dict[name] = process\n return process\n\n def get_nodes(self) -> List[Any]:\n \"\"\"Get the nodes of a single-process system.\n\n Raises:\n ValueError: if system is multi-process.\n\n Returns:\n System nodes.\n \"\"\"\n if self._multi_process:\n raise ValueError(\"Get nodes only implemented for single process setups.\")\n\n return self._nodes\n\n def launch(self) -> None:\n \"\"\"Launch the launchpad program or start the single-process system loop.\n\n Returns:\n None.\n \"\"\"\n if self._multi_process:\n local_resources = lp_utils.to_device(\n program_nodes=self._program.groups.keys(),\n nodes_on_gpu=self._nodes_on_gpu,\n )\n\n lp.launch(\n self._program,\n launch_type=self._lp_launch_type,\n terminal=self._terminal,\n local_resources=local_resources,\n )\n else:\n episode = 1\n step = 1\n executor_steps = 0\n\n data_server = self._node_dict[\"data_server\"]\n _ = self._node_dict[\"parameter_server\"]\n executor = self._node_dict[\"executor\"]\n evaluator = self._node_dict[\"evaluator\"]\n trainer = self._node_dict[\"trainer\"]\n\n # getting the maximum queue size\n queue_threshold = data_server.server_info()[\"trainer\"].max_size\n\n while (\n self._single_process_max_episodes is None\n or episode <= self._single_process_max_episodes\n ):\n # if the queue is too full we skip the executor to ensure that the\n # executor won't hang when trying to push experience\n if data_server.server_info()[\"trainer\"].current_size < int(\n queue_threshold * 0.75\n ):\n executor_stats = executor.run_episode_and_log()\n executor_steps += executor_stats[\"episode_length\"]\n\n print(f\"Episode {episode} completed.\")\n episode += 1\n\n # if the queue has less than sample_batch_size samples in it we skip\n # the trainer to ensure that the trainer won't hang\n if (\n data_server.server_info()[\"trainer\"].current_size\n >= trainer.store.global_config.sample_batch_size\n and step % self._single_process_trainer_period == 0\n ):\n _ = trainer.step() # logging done in trainer\n print(\"Performed trainer step.\")\n if step % self._single_process_evaluator_period == 0:\n _ = evaluator.run_episode_and_log()\n print(\"Performed evaluator run.\")\n\n step += 1\n", "path": "mava/systems/jax/launcher.py" } ]
[ { "content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"General launcher for systems\"\"\"\nfrom typing import Any, Dict, List, Optional, Union\n\nimport launchpad as lp\nimport reverb\n\nfrom mava.utils import lp_utils\nfrom mava.utils.builder_utils import copy_node_fn\n\n\nclass NodeType:\n \"\"\"Specify launchpad node types that systems can use.\"\"\"\n\n reverb = lp.ReverbNode\n courier = lp.CourierNode\n\n\nclass Launcher:\n \"\"\"This mava launcher can be used to launch multi-node systems using either single \\\n or distributed computation.\"\"\"\n\n def __init__(\n self,\n multi_process: bool,\n nodes_on_gpu: List = [],\n single_process_trainer_period: int = 1,\n single_process_evaluator_period: int = 10,\n single_process_max_episodes: Optional[int] = None,\n name: str = \"System\",\n terminal: str = \"current_terminal\",\n lp_launch_type: Union[\n str, lp.LaunchType\n ] = lp.LaunchType.LOCAL_MULTI_PROCESSING,\n ) -> None:\n \"\"\"Initialise the launcher.\n\n If multi-process, set up the launchpad program.\n Otherwise, create a dictionary for the nodes in the system.\n\n Args:\n multi_process : whether to use launchpad to run nodes on separate processes.\n nodes_on_gpu : which nodes should be run on the GPU.\n single_process_trainer_period : number of episodes between single process\n trainer steps.\n single_process_evaluator_period : num episodes between single process\n evaluator steps.\n single_process_max_episodes: maximum number of episodes to run\n before termination.\n name : launchpad program name.\n terminal : terminal for launchpad processes to be shown on.\n lp_launch_type: launchpad launch type.\n \"\"\"\n self._multi_process = multi_process\n self._name = name\n self._single_process_trainer_period = single_process_trainer_period\n self._single_process_evaluator_period = single_process_evaluator_period\n self._single_process_max_episodes = single_process_max_episodes\n self._terminal = terminal\n self._lp_launch_type = lp_launch_type\n if multi_process:\n self._program = lp.Program(name=name)\n self._nodes_on_gpu = nodes_on_gpu\n else:\n self._nodes: List = []\n self._node_dict: Dict = {\n \"data_server\": None,\n \"parameter_server\": None,\n \"executor\": None,\n \"evaluator\": None,\n \"trainer\": None,\n }\n\n def add(\n self,\n node_fn: Any,\n arguments: Any = [],\n node_type: Union[lp.ReverbNode, lp.CourierNode] = NodeType.courier,\n name: str = \"Node\",\n ) -> Any:\n \"\"\"Add a node to the system.\n\n If multi-processing, add a node to the existing launchpad program,\n grouped under the given name.\n This means that when multi-processing,\n you can have multiple nodes of the same name (e.g. executor).\n If system is single-process, only one node per name is allowed in the system.\n\n Args:\n node_fn : Function returning the system process that will run on the node.\n arguments : Arguments used when initialising the system process.\n node_type : Type of launchpad node to use.\n name : Node name (e.g. executor).\n\n Raises:\n ValueError: if single-process and node name is not supported.\n ValueError: if single-process and trying to init a node more than once.\n\n Returns:\n The system process or launchpad node.\n \"\"\"\n # Create a list of arguments\n if type(arguments) is not list:\n arguments = [arguments]\n\n if self._multi_process:\n with self._program.group(name):\n node_fn = copy_node_fn(node_fn)\n node = self._program.add_node(node_type(node_fn, *arguments))\n return node\n else:\n if name not in self._node_dict:\n raise ValueError(\n f\"{name} is not a valid node name.\"\n + \"Single process currently only supports \"\n + \"nodes named: {list(self._node_dict.keys())}\"\n )\n elif self._node_dict[name] is not None:\n raise ValueError(\n f\"Node named {name} initialised more than once.\"\n + \"Single process currently only supports one node per type.\"\n )\n\n node_fn = copy_node_fn(node_fn)\n process = node_fn(*arguments)\n if node_type == lp.ReverbNode:\n # Assigning server to self to keep it alive.\n self._replay_server = reverb.Server(process, port=None)\n process = reverb.Client(f\"localhost:{self._replay_server.port}\")\n self._nodes.append(process)\n self._node_dict[name] = process\n return process\n\n def get_nodes(self) -> List[Any]:\n \"\"\"Get the nodes of a single-process system.\n\n Raises:\n ValueError: if system is multi-process.\n\n Returns:\n System nodes.\n \"\"\"\n if self._multi_process:\n raise ValueError(\"Get nodes only implemented for single process setups.\")\n\n return self._nodes\n\n def launch(self) -> None:\n \"\"\"Launch the launchpad program or start the single-process system loop.\n\n Returns:\n None.\n \"\"\"\n if self._multi_process:\n local_resources = lp_utils.to_device(\n program_nodes=self._program.groups.keys(),\n nodes_on_gpu=self._nodes_on_gpu,\n )\n\n lp.launch(\n self._program,\n launch_type=self._lp_launch_type,\n terminal=self._terminal,\n local_resources=local_resources,\n )\n else:\n episode = 1\n step = 1\n executor_steps = 0\n\n data_server = self._node_dict[\"data_server\"]\n _ = self._node_dict[\"parameter_server\"]\n executor = self._node_dict[\"executor\"]\n evaluator = self._node_dict[\"evaluator\"]\n trainer = self._node_dict[\"trainer\"]\n\n # getting the maximum queue size\n queue_threshold = data_server.server_info()[\"trainer\"].max_size\n\n while (\n self._single_process_max_episodes is None\n or episode <= self._single_process_max_episodes\n ):\n # if the queue is too full we skip the executor to ensure that the\n # executor won't hang when trying to push experience\n if data_server.server_info()[\"trainer\"].current_size < int(\n queue_threshold * 0.75\n ):\n executor_stats = executor.run_episode_and_log()\n executor_steps += executor_stats[\"episode_length\"]\n\n print(f\"Episode {episode} completed.\")\n episode += 1\n\n # if the queue has less than sample_batch_size samples in it we skip\n # the trainer to ensure that the trainer won't hang\n if (\n data_server.server_info()[\"trainer\"].current_size\n >= trainer.store.global_config.sample_batch_size\n and step % self._single_process_trainer_period == 0\n ):\n _ = trainer.step() # logging done in trainer\n print(\"Performed trainer step.\")\n if step % self._single_process_evaluator_period == 0:\n _ = evaluator.run_episode_and_log()\n print(\"Performed evaluator run.\")\n\n step += 1\n", "path": "mava/systems/jax/launcher.py" } ]
diff --git a/mava/systems/jax/launcher.py b/mava/systems/jax/launcher.py index 141eab647..6369269f9 100644 --- a/mava/systems/jax/launcher.py +++ b/mava/systems/jax/launcher.py @@ -119,6 +119,7 @@ def add( if self._multi_process: with self._program.group(name): + node_fn = copy_node_fn(node_fn) node = self._program.add_node(node_type(node_fn, *arguments)) return node else: diff --git a/tests/jax/components/building/distributor_test.py b/tests/jax/components/building/distributor_test.py index d576ed661..ebb37470c 100644 --- a/tests/jax/components/building/distributor_test.py +++ b/tests/jax/components/building/distributor_test.py @@ -47,7 +47,7 @@ def __init__(self) -> None: self.program_launched = False def data_server(self) -> List[Any]: - """Data server to test no multi_process in on_building_program_nodes method + """Data server to test on_building_program_nodes method Returns: tables: fake table composed of reverb_server tables @@ -63,13 +63,13 @@ def data_server(self) -> List[Any]: ] def parameter_server(self) -> str: - """parameter_server to test no multi_process in on_building_program_nodes""" + """parameter_server to test on_building_program_nodes""" return "Parameter Server Test" def executor( self, executor_id: str, data_server_client: Any, parameter_server_client: Any ) -> str: - """Executor to test no multi_process in on_building_program_nodes method""" + """Executor to test on_building_program_nodes method""" if executor_id == "evaluator": return "Evaluator Test" @@ -79,7 +79,7 @@ def executor( def trainer( self, trainer_id: str, data_server_client: Any, parameter_server_client: Any ) -> str: - """Trainer to test no multi_process in on_building_program_nodes method""" + """Trainer to test on_building_program_nodes method""" return "Trainer Test" def launch(self) -> None: @@ -119,27 +119,34 @@ def test_on_building_program_nodes_multi_process( "evaluator", "trainer", ] + + data_server_fn = mock_builder.store.program._program._groups["data_server"][ + -1 + ]._priority_tables_fn + assert str(repr(data_server_fn).split(" ")[2].split(".")[-1]) == "data_server" assert ( - mock_builder.store.program._program._groups["data_server"][ + mock_builder.store.program._program._groups["parameter_server"][ -1 - ]._priority_tables_fn - == mock_builder.data_server - ) - assert ( - mock_builder.store.program._program._groups["parameter_server"][-1]._constructor - == mock_builder.parameter_server + ]._constructor() + == "Parameter Server Test" ) assert ( - mock_builder.store.program._program._groups["executor"][-1]._constructor - == mock_builder.executor + mock_builder.store.program._program._groups["executor"][-1]._constructor( + "executor", "fake_data_server", "fake_parameter_server" + ) + == "Executor Test" ) assert ( - mock_builder.store.program._program._groups["evaluator"][-1]._constructor - == mock_builder.executor + mock_builder.store.program._program._groups["evaluator"][-1]._constructor( + "evaluator", "fake_data_server", "fake_parameter_server" + ) + == "Evaluator Test" ) assert ( - mock_builder.store.program._program._groups["trainer"][-1]._constructor - == mock_builder.trainer + mock_builder.store.program._program._groups["trainer"][-1]._constructor( + "trainer", "fake_data_server", "fake_parameter_server" + ) + == "Trainer Test" ) with pytest.raises(Exception): @@ -162,23 +169,28 @@ def test_on_building_program_nodes_multi_process_no_evaluator( "executor", "trainer", ] + data_server_fn = mock_builder.store.program._program._groups["data_server"][ + -1 + ]._priority_tables_fn + assert str(repr(data_server_fn).split(" ")[2].split(".")[-1]) == "data_server" + assert ( - mock_builder.store.program._program._groups["data_server"][ + mock_builder.store.program._program._groups["parameter_server"][ -1 - ]._priority_tables_fn - == mock_builder.data_server - ) - assert ( - mock_builder.store.program._program._groups["parameter_server"][-1]._constructor - == mock_builder.parameter_server + ]._constructor() + == "Parameter Server Test" ) assert ( - mock_builder.store.program._program._groups["executor"][-1]._constructor - == mock_builder.executor + mock_builder.store.program._program._groups["executor"][-1]._constructor( + "executor", "fake_data_server", "fake_parameter_server" + ) + == "Executor Test" ) assert ( - mock_builder.store.program._program._groups["trainer"][-1]._constructor - == mock_builder.trainer + mock_builder.store.program._program._groups["trainer"][-1]._constructor( + "trainer", "fake_data_server", "fake_parameter_server" + ) + == "Trainer Test" ) with pytest.raises(Exception): diff --git a/tests/jax/core_system_components/launcher_test.py b/tests/jax/core_system_components/launcher_test.py index ecf776f95..6a6771093 100644 --- a/tests/jax/core_system_components/launcher_test.py +++ b/tests/jax/core_system_components/launcher_test.py @@ -15,62 +15,14 @@ """Tests for launcher class for Jax-based Mava systems""" -from typing import Any, Callable, List - import launchpad as lp import pytest -from reverb import Client, item_selectors, pybind, rate_limiters -from reverb import server as reverb_server +from reverb import Client, pybind from mava.systems.jax.launcher import Launcher, NodeType from tests.jax.components.building.distributor_test import MockBuilder [email protected] -def mock_data_server_fn() -> Callable: - """call data_server function""" - - def data_server() -> List[Any]: - """data_server - - Returns: - tables: fake table composed of reverb_server tables - """ - return [ - reverb_server.Table( - name="table_0", - sampler=item_selectors.Prioritized(priority_exponent=1), - remover=item_selectors.Fifo(), - max_size=1000, - rate_limiter=rate_limiters.MinSize(1), - ) - ] - - return data_server - - [email protected] -def mock_parameter_server_fn() -> Callable: - """call parameter_server function""" - - def parameter_server() -> str: - """Fake parameter server function""" - return "test_parameter_server" - - return parameter_server - - [email protected] -def mock_parameter_server_second_fn() -> Callable: - """call the second parameter_server function""" - - def parameter_server_second() -> str: - """Another fake parameter server function""" - return "test_parameter_server_second_mock" - - return parameter_server_second - - @pytest.fixture def mock_builder() -> MockBuilder: """Mock builder""" @@ -120,7 +72,7 @@ def test_initiator_non_multi_process() -> None: assert not hasattr(launcher, "_program") -def test_add_multi_process(mock_data_server_fn: Callable) -> None: +def test_add_multi_process(mock_builder: MockBuilder) -> None: """Test add method in the Launcher for the case of multi process Args: @@ -128,16 +80,19 @@ def test_add_multi_process(mock_data_server_fn: Callable) -> None: """ launcher = Launcher(multi_process=True) data_server = launcher.add( - mock_data_server_fn, + mock_builder.data_server, node_type=NodeType.reverb, name="data_server_test", ) assert list(launcher._program._groups.keys()) == ["data_server_test"] - assert ( - launcher._program._groups["data_server_test"][-1]._priority_tables_fn - == mock_data_server_fn - ) + + # Make sure the node have data_server method + data_server_fn = launcher._program._groups["data_server_test"][ + -1 + ]._priority_tables_fn + assert str(repr(data_server_fn).split(" ")[2].split(".")[-1]) == "data_server" + assert [data_server] == launcher._program._groups["data_server_test"][ -1 ]._created_handles @@ -146,9 +101,7 @@ def test_add_multi_process(mock_data_server_fn: Callable) -> None: assert not hasattr(launcher, "_node_dict") -def test_add_multi_process_two_add_calls( - mock_data_server_fn: Callable, mock_parameter_server_fn: Callable -) -> None: +def test_add_multi_process_two_add_calls(mock_builder: MockBuilder) -> None: """Test calling add more than one time method in the Launcher for the case of multi process Args: @@ -157,12 +110,12 @@ def test_add_multi_process_two_add_calls( """ launcher = Launcher(multi_process=True) data_server = launcher.add( - mock_data_server_fn, + mock_builder.data_server, node_type=NodeType.reverb, name="data_server_test", ) parameter_server = launcher.add( - mock_parameter_server_fn, + mock_builder.parameter_server, node_type=NodeType.courier, name="parameter_server_test", ) @@ -172,17 +125,19 @@ def test_add_multi_process_two_add_calls( "parameter_server_test", ] - assert ( - launcher._program._groups["data_server_test"][-1]._priority_tables_fn - == mock_data_server_fn - ) + # Make sure the node have data_server method + data_server_fn = launcher._program._groups["data_server_test"][ + -1 + ]._priority_tables_fn + assert str(repr(data_server_fn).split(" ")[2].split(".")[-1]) == "data_server" + assert [data_server] == launcher._program._groups["data_server_test"][ -1 ]._created_handles assert ( - launcher._program._groups["parameter_server_test"][-1]._constructor - == mock_parameter_server_fn + launcher._program._groups["parameter_server_test"][-1]._constructor() + == "Parameter Server Test" ) assert [parameter_server] == launcher._program._groups["parameter_server_test"][ -1 @@ -192,9 +147,7 @@ def test_add_multi_process_two_add_calls( assert not hasattr(launcher, "_node_dict") -def test_add_multi_process_two_add_same_name( - mock_parameter_server_fn: Callable, mock_parameter_server_second_fn: Callable -) -> None: +def test_add_multi_process_two_add_same_name(mock_builder: MockBuilder) -> None: """Test calling twice add for two nodes with same name for the case of multi process Args: @@ -204,24 +157,24 @@ def test_add_multi_process_two_add_same_name( launcher = Launcher(multi_process=True) parameter_server_1 = launcher.add( - mock_parameter_server_fn, + mock_builder.parameter_server, node_type=NodeType.courier, name="parameter_server_test", ) parameter_server_2 = launcher.add( - mock_parameter_server_second_fn, + mock_builder.parameter_server, node_type=NodeType.courier, name="parameter_server_test", ) assert list(launcher._program._groups.keys()) == ["parameter_server_test"] assert ( - launcher._program._groups["parameter_server_test"][0]._constructor - == mock_parameter_server_fn + launcher._program._groups["parameter_server_test"][0]._constructor() + == "Parameter Server Test" ) assert ( - launcher._program._groups["parameter_server_test"][1]._constructor - == mock_parameter_server_second_fn + launcher._program._groups["parameter_server_test"][1]._constructor() + == "Parameter Server Test" ) assert [parameter_server_1] == launcher._program._groups["parameter_server_test"][ 0 diff --git a/tests/jax/systems/ippo_system_test.py b/tests/jax/systems/ippo_system_test.py index 5df91c305..59e0bbac5 100644 --- a/tests/jax/systems/ippo_system_test.py +++ b/tests/jax/systems/ippo_system_test.py @@ -1,8 +1,28 @@ +# python3 +# Copyright 2021 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import functools +from datetime import datetime +from typing import Any -import acme +import launchpad as lp import optax import pytest +from launchpad.launch.test_multi_threading import ( + address_builder as test_address_builder, +) from mava.systems.jax import ippo from mava.systems.jax.system import System @@ -15,16 +35,14 @@ @pytest.fixture def test_full_system() -> System: - """Add description here.""" + """Full mava system fixture for testing""" return ippo.IPPOSystem() -# TODO: fix test [email protected](reason="test is currently breaking ci pipeline") -def test_except_trainer( +def test_ippo( test_full_system: System, ) -> None: - """Test if the parameter server instantiates processes as expected.""" + """Full integration test of ippo system.""" # Environment. environment_factory = functools.partial( @@ -34,11 +52,17 @@ def test_except_trainer( ) # Networks. - network_factory = ippo.make_default_networks + def network_factory(*args: Any, **kwargs: Any) -> Any: + return ippo.make_default_networks( # type: ignore + policy_layer_sizes=(32, 32), + critic_layer_sizes=(64, 64), + *args, + **kwargs, + ) # Checkpointer appends "Checkpoints" to checkpoint_dir. base_dir = "~/mava" - mava_id = "12345" + mava_id = str(datetime.now()) checkpoint_subpath = f"{base_dir}/{mava_id}" # Log every [log_every] seconds. @@ -54,7 +78,8 @@ def test_except_trainer( # Optimizer. optimizer = optax.chain( - optax.clip_by_global_norm(40.0), optax.scale_by_adam(), optax.scale(-1e-4) + optax.clip_by_global_norm(40.0), + optax.adam(1e-4), ) # Build the system @@ -62,28 +87,27 @@ def test_except_trainer( environment_factory=environment_factory, network_factory=network_factory, logger_factory=logger_factory, - checkpoint_subpath=checkpoint_subpath, + experiment_path=checkpoint_subpath, optimizer=optimizer, - executor_parameter_update_period=20, - multi_process=False, + executor_parameter_update_period=1, + multi_process=True, run_evaluator=True, num_executors=1, + max_queue_size=500, use_next_extras=False, - sample_batch_size=2, + sample_batch_size=5, + nodes_on_gpu=[], + lp_launch_type=lp.LaunchType.TEST_MULTI_THREADING, ) - ( - data_server, - parameter_server, - executor, - evaluator, - trainer, - ) = test_full_system._builder.store.system_build - - assert isinstance(executor, acme.core.Worker) + (trainer_node,) = test_full_system._builder.store.program._program._groups[ + "trainer" + ] + trainer_node.disable_run() + test_address_builder.bind_addresses([trainer_node]) - # Step the executor - executor.run_episode() + test_full_system.launch() + trainer_run = trainer_node.create_handle().dereference() - # Step the trainer - trainer.step() + for _ in range(5): + trainer_run.step()
scikit-hep__pyhf-1089
Write JOSS Submission # Description Given conversations with @labarba at SciPy 2019 it seems that pyhf would be a good candidate for a [JOSS paper](https://joss.theoj.org/). Given @cranmer's good experience with this for carl [![DOI](http://joss.theoj.org/papers/10.21105/joss.00011/status.svg)](https://doi.org/10.21105/joss.00011) it seems if we clean up and finish a few of the docstrings then we would be in good shape to submit for a review (once we finish up the other publication process we have ongoing). The [carl paper submission review](https://github.com/openjournals/joss-reviews/issues/11) might be a good premeptive checklist for this.
[ { "content": "#\n# pyhf documentation build configuration file, created by\n# sphinx-quickstart on Fri Feb 9 11:58:49 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use Path('../relative_path_to_dir').resolve() to make it absolute, like shown here.\n\nfrom pathlib import Path\nimport sys\nfrom pkg_resources import get_distribution\n\nsys.path.insert(0, str(Path('../src').resolve()))\nsys.path.insert(1, str(Path('./exts').resolve()))\n\n\ndef setup(app):\n app.add_css_file(\n 'https://cdnjs.cloudflare.com/ajax/libs/github-fork-ribbon-css/0.2.2/gh-fork-ribbon.min.css'\n )\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.bibtex',\n 'sphinx.ext.napoleon',\n 'sphinx_click.ext',\n 'nbsphinx',\n 'sphinx_issues',\n 'sphinx_copybutton',\n 'xref',\n]\nbibtex_bibfiles = [\n \"bib/docs.bib\",\n \"bib/HEPData_likelihoods.bib\",\n \"bib/media.bib\",\n \"bib/posters.bib\",\n \"bib/preferred.bib\",\n \"bib/talks.bib\",\n \"bib/tutorials.bib\",\n \"bib/use_citations.bib\",\n]\n\n# external links\nxref_links = {\"arXiv:1007.1727\": (\"[1007.1727]\", \"https://arxiv.org/abs/1007.1727\")}\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),\n 'uproot': ('https://uproot.readthedocs.io/en/latest/', None),\n}\n\n# Github repo\nissues_github_path = 'scikit-hep/pyhf'\n\n# Generate the API documentation when building\nautosummary_generate = True\nnumpydoc_show_class_members = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = ['.rst', '.md']\n# source_suffix = '.rst'\n\n# The encoding of source files.\n#\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'pyhf'\ncopyright = '2018, Lukas Heinrich, Matthew Feickert, Giordon Stark'\nauthor = 'Lukas Heinrich, Matthew Feickert, Giordon Stark'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n# The full version, including alpha/beta/rc tags.\nrelease = get_distribution('pyhf').version\n# for example take major/minor/patch\nversion = '.'.join(release.split('.')[:3])\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#\n# today = ''\n#\n# Else, today_fmt is used as the format for a strftime call.\n#\n# today_fmt = '%B %d, %Y'\n\nautodoc_mock_imports = [\n 'tensorflow',\n 'torch',\n 'jax',\n 'iminuit',\n 'tensorflow_probability',\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\n '_build',\n '**.ipynb_checkpoints',\n 'examples/experiments/edwardpyhf.ipynb',\n 'examples/notebooks/ImpactPlot.ipynb',\n 'examples/notebooks/Recast.ipynb',\n 'examples/notebooks/StatError.ipynb',\n 'examples/notebooks/example-tensorflow.ipynb',\n 'examples/notebooks/histogrammar.ipynb',\n 'examples/notebooks/histosys.ipynb',\n 'examples/notebooks/histosys-pytorch.ipynb',\n 'examples/notebooks/importxml.ipynb',\n 'examples/notebooks/multichannel-coupled-normsys.ipynb',\n 'examples/notebooks/multichannel-normsys.ipynb',\n 'examples/notebooks/normsys.ipynb',\n 'examples/notebooks/pullplot.ipynb',\n 'examples/notebooks/pytorch_tests_onoff.ipynb',\n 'examples/notebooks/tensorflow-limit.ipynb',\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#\n# html_title = u'pyhf v0.3.0'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#\n# html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#\nhtml_extra_path = ['_extras']\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#\n# html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n#\n# html_domain_indices = True\n\n# If false, no index is generated.\n#\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'pyhfdoc'\n\n# sphinx-copybutton configuration\ncopybutton_prompt_text = \">>> \"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'pyhf.tex',\n 'pyhf Documentation',\n 'Lukas Heinrich, Matthew Feickert, Giordon Stark',\n 'manual',\n )\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n#\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#\n# latex_appendices = []\n\n# It false, will not define \\strong, \\code, \titleref, \\crossref ... but only\n# \\sphinxstrong, ..., \\sphinxtitleref, ... To help avoid clash with user added\n# packages.\n#\n# latex_keep_old_macro_names = True\n\n# If false, no module index is generated.\n#\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'pyhf', 'pyhf Documentation', [author], 1)]\n\n# If true, show URL addresses after external links.\n#\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'pyhf',\n 'pyhf Documentation',\n author,\n 'pyhf',\n 'One line description of project.',\n 'Miscellaneous',\n )\n]\n\n# Documents to append as an appendix to all manuals.\n#\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n#\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#\n# texinfo_no_detailmenu = False\n\nmathjax_config = {\n 'tex2jax': {'inlineMath': [['$', '$'], ['\\\\(', '\\\\)']]},\n 'TeX': {\n 'Macros': {\n 'bm': [\"\\\\boldsymbol{#1}\", 1], # \\usepackage{bm}, see mathjax/MathJax#1219\n 'HiFa': r'\\texttt{HistFactory}',\n 'Root': r'\\texttt{ROOT}',\n 'RooStats': r'\\texttt{RooStats}',\n 'RooFit': r'\\texttt{RooFit}',\n 'pyhf': r'\\texttt{pyhf}',\n 'CLs': r'\\mathrm{CL}_{s}',\n 'freeset': r'\\bm{\\eta}',\n 'constrset': r'\\bm{\\chi}',\n 'singleconstr': r'\\chi',\n 'channelcounts': r'\\bm{n}',\n 'auxdata': r'\\bm{a}',\n 'poiset': r'\\bm{\\psi}',\n 'nuisset': r'\\bm{\\theta}',\n 'fullset': r'\\bm{\\phi}',\n 'singlefull': r'\\phi',\n 'TeV': r'\\textrm{TeV}',\n }\n },\n}\n", "path": "docs/conf.py" } ]
[ { "content": "#\n# pyhf documentation build configuration file, created by\n# sphinx-quickstart on Fri Feb 9 11:58:49 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use Path('../relative_path_to_dir').resolve() to make it absolute, like shown here.\n\nfrom pathlib import Path\nimport sys\nfrom pkg_resources import get_distribution\n\nsys.path.insert(0, str(Path('../src').resolve()))\nsys.path.insert(1, str(Path('./exts').resolve()))\n\n\ndef setup(app):\n app.add_css_file(\n 'https://cdnjs.cloudflare.com/ajax/libs/github-fork-ribbon-css/0.2.2/gh-fork-ribbon.min.css'\n )\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.bibtex',\n 'sphinx.ext.napoleon',\n 'sphinx_click.ext',\n 'nbsphinx',\n 'sphinx_issues',\n 'sphinx_copybutton',\n 'xref',\n]\nbibtex_bibfiles = [\n \"bib/docs.bib\",\n \"bib/HEPData_likelihoods.bib\",\n \"bib/media.bib\",\n \"bib/posters.bib\",\n \"bib/preferred.bib\",\n \"bib/talks.bib\",\n \"bib/tutorials.bib\",\n \"bib/use_citations.bib\",\n]\n\n# external links\nxref_links = {\"arXiv:1007.1727\": (\"[1007.1727]\", \"https://arxiv.org/abs/1007.1727\")}\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),\n 'uproot': ('https://uproot.readthedocs.io/en/latest/', None),\n}\n\n# Github repo\nissues_github_path = 'scikit-hep/pyhf'\n\n# Generate the API documentation when building\nautosummary_generate = True\nnumpydoc_show_class_members = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = ['.rst', '.md']\n# source_suffix = '.rst'\n\n# The encoding of source files.\n#\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'pyhf'\ncopyright = '2018, Lukas Heinrich, Matthew Feickert, Giordon Stark'\nauthor = 'Lukas Heinrich, Matthew Feickert, Giordon Stark'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n# The full version, including alpha/beta/rc tags.\nrelease = get_distribution('pyhf').version\n# for example take major/minor/patch\nversion = '.'.join(release.split('.')[:3])\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#\n# today = ''\n#\n# Else, today_fmt is used as the format for a strftime call.\n#\n# today_fmt = '%B %d, %Y'\n\nautodoc_mock_imports = [\n 'tensorflow',\n 'torch',\n 'jax',\n 'iminuit',\n 'tensorflow_probability',\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\n '_build',\n 'JOSS',\n '**.ipynb_checkpoints',\n 'examples/experiments/edwardpyhf.ipynb',\n 'examples/notebooks/ImpactPlot.ipynb',\n 'examples/notebooks/Recast.ipynb',\n 'examples/notebooks/StatError.ipynb',\n 'examples/notebooks/example-tensorflow.ipynb',\n 'examples/notebooks/histogrammar.ipynb',\n 'examples/notebooks/histosys.ipynb',\n 'examples/notebooks/histosys-pytorch.ipynb',\n 'examples/notebooks/importxml.ipynb',\n 'examples/notebooks/multichannel-coupled-normsys.ipynb',\n 'examples/notebooks/multichannel-normsys.ipynb',\n 'examples/notebooks/normsys.ipynb',\n 'examples/notebooks/pullplot.ipynb',\n 'examples/notebooks/pytorch_tests_onoff.ipynb',\n 'examples/notebooks/tensorflow-limit.ipynb',\n]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#\n# html_title = u'pyhf v0.3.0'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#\n# html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#\nhtml_extra_path = ['_extras']\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#\n# html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n#\n# html_domain_indices = True\n\n# If false, no index is generated.\n#\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'pyhfdoc'\n\n# sphinx-copybutton configuration\ncopybutton_prompt_text = \">>> \"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'pyhf.tex',\n 'pyhf Documentation',\n 'Lukas Heinrich, Matthew Feickert, Giordon Stark',\n 'manual',\n )\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n#\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#\n# latex_appendices = []\n\n# It false, will not define \\strong, \\code, \titleref, \\crossref ... but only\n# \\sphinxstrong, ..., \\sphinxtitleref, ... To help avoid clash with user added\n# packages.\n#\n# latex_keep_old_macro_names = True\n\n# If false, no module index is generated.\n#\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'pyhf', 'pyhf Documentation', [author], 1)]\n\n# If true, show URL addresses after external links.\n#\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'pyhf',\n 'pyhf Documentation',\n author,\n 'pyhf',\n 'One line description of project.',\n 'Miscellaneous',\n )\n]\n\n# Documents to append as an appendix to all manuals.\n#\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n#\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#\n# texinfo_no_detailmenu = False\n\nmathjax_config = {\n 'tex2jax': {'inlineMath': [['$', '$'], ['\\\\(', '\\\\)']]},\n 'TeX': {\n 'Macros': {\n 'bm': [\"\\\\boldsymbol{#1}\", 1], # \\usepackage{bm}, see mathjax/MathJax#1219\n 'HiFa': r'\\texttt{HistFactory}',\n 'Root': r'\\texttt{ROOT}',\n 'RooStats': r'\\texttt{RooStats}',\n 'RooFit': r'\\texttt{RooFit}',\n 'pyhf': r'\\texttt{pyhf}',\n 'CLs': r'\\mathrm{CL}_{s}',\n 'freeset': r'\\bm{\\eta}',\n 'constrset': r'\\bm{\\chi}',\n 'singleconstr': r'\\chi',\n 'channelcounts': r'\\bm{n}',\n 'auxdata': r'\\bm{a}',\n 'poiset': r'\\bm{\\psi}',\n 'nuisset': r'\\bm{\\theta}',\n 'fullset': r'\\bm{\\phi}',\n 'singlefull': r'\\phi',\n 'TeV': r'\\textrm{TeV}',\n }\n },\n}\n", "path": "docs/conf.py" } ]
diff --git a/docs/JOSS/paper.bib b/docs/JOSS/paper.bib new file mode 100644 index 0000000000..2a5c77ea64 --- /dev/null +++ b/docs/JOSS/paper.bib @@ -0,0 +1,135 @@ +@techreport{Cranmer:1456844, + author = "Cranmer, Kyle and Lewis, George and Moneta, Lorenzo and + Shibata, Akira and Verkerke, Wouter", + title = "{HistFactory: A tool for creating statistical models for + use with RooFit and RooStats}", + institution = "New York U.", + collaboration = "ROOT Collaboration", + address = "New York", + number = "CERN-OPEN-2012-016", + month = "Jan", + year = "2012", + reportNumber = "CERN-OPEN-2012-016", + url = "https://cds.cern.ch/record/1456844", +} + +@Article{HIGG-2013-02, + author = "{ATLAS Collaboration}", + title = "{Measurements of Higgs boson production and couplings in diboson final states with the ATLAS detector at the LHC}", + journal = "Phys. Lett. B", + volume = "726", + year = "2013", + pages = "88", + doi = "10.1016/j.physletb.2014.05.011", + reportNumber = "CERN-PH-EP-2013-103", + eprint = "1307.1427", + archivePrefix = "arXiv", + primaryClass = "hep-ex", + related = "HIGG-2013-02-err", + relatedstring = "Erratum:", +} + +@Booklet{ATLAS-CONF-2018-041, + author = "{ATLAS Collaboration}", + title = "{Search for supersymmetry in final states with missing transverse momentum and multiple $b$-jets in proton--proton collisions at $\sqrt{s} = 13$ $\mathrm{TeV}$ with the ATLAS detector}", + howpublished = "{ATLAS-CONF-2018-041}", + url = "https://cds.cern.ch/record/2632347", + year = "2018", +} + +@article{Maguire:2017ypu, + author = "Maguire, Eamonn and Heinrich, Lukas and Watt, Graeme", + title = "{HEPData: a repository for high energy physics data}", + booktitle = "{Proceedings, 22nd International Conference on Computing + in High Energy and Nuclear Physics (CHEP2016): San + Francisco, CA, October 14-16, 2016}", + journal = "J. Phys. Conf. Ser.", + volume = "898", + year = "2017", + number = "10", + pages = "102006", + doi = "10.1088/1742-6596/898/10/102006", + eprint = "1704.05473", + archivePrefix = "arXiv", + primaryClass = "hep-ex", + reportNumber = "IPPP-17-31", + SLACcitation = "%%CITATION = ARXIV:1704.05473;%%" +} + +@software{pyhf_zenodo, + author = "{Heinrich, Lukas and Feickert, Matthew and Stark, Giordon}", + title = "{pyhf: v0.5.4}", + version = {0.5.4}, + doi = {10.5281/zenodo.1169739}, + url = {https://github.com/scikit-hep/pyhf}, + year = {2020} +} + +@booklet{ATL-PHYS-PUB-2019-029, + author = "{ATLAS Collaboration}", + title = "{Reproducing searches for new physics with the ATLAS + experiment through publication of full statistical + likelihoods}", + institution = "CERN", + address = "Geneva", + number = "ATL-PHYS-PUB-2019-029", + month = "Aug", + year = "2019", + reportNumber = "ATL-PHYS-PUB-2019-029", + url = "https://cds.cern.ch/record/2684863", +} + +@article{Abdallah:2020pec, + author = "Abdallah, Waleed and others", + collaboration = "LHC Reinterpretation Forum", + title = "{Reinterpretation of LHC Results for New Physics: Status and Recommendations after Run 2}", + eprint = "2003.07868", + archivePrefix = "arXiv", + primaryClass = "hep-ph", + reportNumber = "CERN-LPCC-2020-001, FERMILAB-FN-1098-CMS-T, Imperial/HEP/2020/RIF/01", + doi = "10.21468/SciPostPhys.9.2.022", + journal = "SciPost Phys.", + volume = "9", + number = "2", + pages = "022", + year = "2020" +} + +@article{Khosa:2020zar, + author = "Khosa, Charanjit K. and Kraml, Sabine and Lessa, Andre and Neuhuber, Philipp and Waltenberger, Wolfgang", + title = "{SModelS database update v1.2.3}", + eprint = "2005.00555", + archivePrefix = "arXiv", + primaryClass = "hep-ph", + doi = "10.31526/lhep.2020.158", + journal = "LHEP", + volume = "158", + pages = "2020", + month = "5", + year = "2020" +} + +@article{Alguero:2020grj, + author = "Alguero, Gaël and Kraml, Sabine and Waltenberger, Wolfgang", + title = "{A SModelS interface for pyhf likelihoods}", + eprint = "2009.01809", + archivePrefix = "arXiv", + primaryClass = "hep-ph", + month = "Sep", + year = "2020", + journal = "" +} + +@Article{Cowan:2010js, + Author = {Cowan, Glen and Cranmer, Kyle and Gross, Eilam and Vitells, Ofer}, + Eprint = "1007.1727", + Archiveprefix = {arXiv}, + Primaryclass = {physics.data-an}, + Journal = {Eur. Phys. J. C}, + Pages = {1554}, + doi = "10.1140/epjc/s10052-011-1554-0", + Title = {{Asymptotic formulae for likelihood-based tests of new physics}}, + Volume = {71}, + Year = {2011}, + Addendum = "Erratum: \href{http://dx.doi.org/10.1140/epjc/s10052-013-2501-z}{Eur. Phys. J. C {\textbf{73}} (2013) 2501}" +} diff --git a/docs/JOSS/paper.md b/docs/JOSS/paper.md new file mode 100644 index 0000000000..7f8e8ec187 --- /dev/null +++ b/docs/JOSS/paper.md @@ -0,0 +1,64 @@ +--- +title: 'pyhf: pure-Python implementation of HistFactory statistical models' +tags: + - Python + - physics + - high energy physics + - statistical modeling + - fitting + - auto-differentiation +authors: + - name: Lukas Heinrich + orcid: 0000-0002-4048-7584 + affiliation: 1 + - name: Matthew Feickert^[Corresponding author.] + orcid: 0000-0003-4124-7862 + affiliation: 2 + - name: Giordon Stark + orcid: 0000-0001-6616-3433 + affiliation: 3 + - name: Kyle Cranmer + orcid: 0000-0002-5769-7094 + affiliation: 4 +affiliations: + - name: CERN + index: 1 + - name: University of Illinois at Urbana-Champaign + index: 2 + - name: SCIPP, University of California, Santa Cruz + index: 3 + - name: New York University + index: 4 +date: 5 October 2020 +bibliography: paper.bib +--- + +# Summary + +Statistical analysis of High Energy Physics (HEP) data relies on quantifying the compatibility of observed collision events with theoretical predictions. +The relationship between them is often formalised in a statistical model $f(\mathbf{x}|\mathbf{\phi})$ describing the probability of data $\mathbf{x}$ given model parameters $\mathbf{\phi}$. +Given observed data, the likelihood $\mathcal{L}(\mathbf{\phi})$ then serves as the basis for inference on the parameters $\mathbf{\phi}$. +For measurements based on binned data (histograms), the `HistFactory` family of statistical models [@Cranmer:1456844] has been widely used in both Standard Model measurements [@HIGG-2013-02] as well as searches for new physics [@ATLAS-CONF-2018-041]. +`pyhf` is a pure-Python implementation of the `HistFactory` model specification and implements a declarative, plain-text format for describing `HistFactory`-based likelihoods that is targeted for reinterpretation and long-term preservation in analysis data repositories such as HEPData [@Maguire:2017ypu]. + +Through adoption of open source "tensor" computational Python libraries, `pyhf` decreases the abstractions between a physicist performing an analysis and the statistical modeling without sacrificing computational speed. +By taking advantage of tensor calculations, `pyhf` outperforms the traditional `C++` implementation of `HistFactory` on data from real LHC analyses. +`pyhf`'s default computational backend is built from NumPy and SciPy, and supports TensorFlow, PyTorch, and JAX as alternative backend choices. +These alternative backends support hardware acceleration on GPUs, and in the case of JAX JIT compilation, as well as auto-differentiation allowing for calculating the full gradient of the likelihood function &mdash; all contributing to speeding up fits. +The source code for `pyhf` has been archived on Zenodo with the linked DOI: [@pyhf_zenodo] + +## Impact on Physics + +In addition to enabling the first publication of full likelihoods by an LHC experiment [@ATL-PHYS-PUB-2019-029], `pyhf` has been used by the `SModelS` library to improve the reinterpretation of results of searches for new physics at LHC experiments [@Abdallah:2020pec; @Khosa:2020zar; @Alguero:2020grj]. + +## Future work + +Future development aims to provide support for limit setting through pseudoexperiment generation in the regimes in which asymptotic approximations [@Cowan:2010js] are no longer valid. +Further improvements to the performance of the library as well as API refinement are also planned. + +# Acknowledgements + +We would like to thank everyone who has made contributions to `pyhf` and thank our fellow developers in the Scikit-HEP community for their continued support and feedback. +Matthew Feickert and Kyle Cranmer have received support for work related to `pyhf` provided by NSF cooperative agreement OAC-1836650 (IRIS-HEP) and grant OAC-1450377 (DIANA/HEP). + +# References diff --git a/docs/conf.py b/docs/conf.py index 91d20c6bb1..6f09cc7b2e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -141,6 +141,7 @@ def setup(app): # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [ '_build', + 'JOSS', '**.ipynb_checkpoints', 'examples/experiments/edwardpyhf.ipynb', 'examples/notebooks/ImpactPlot.ipynb', diff --git a/docs/governance/ROADMAP.rst b/docs/governance/ROADMAP.rst index 89520c1116..e9c71dc661 100644 --- a/docs/governance/ROADMAP.rst +++ b/docs/governance/ROADMAP.rst @@ -66,7 +66,7 @@ Roadmap [2019-Q3] - |uncheck| Finalize logo and add it to website (Issue #453) [2019-Q3 → 2019-Q4] - - |uncheck| Write submission to `JOSS <https://joss.theoj.org/>`__ (Issue + - |check| Write submission to `JOSS <https://joss.theoj.org/>`__ (Issue #502) and write submission to `pyOpenSci <https://www.pyopensci.org/>`__ [2019-Q4 → 2020-Q2] - |uncheck| Contribute to `IRIS-HEP Analysis Systems
facebookresearch__hydra-2543
[Bug] MissingConfigException cannot be correctly deserialized, due to lack of missing_cfg_file ctor default # 🐛 Bug ## Description in https://github.com/facebookresearch/hydra/blob/main/hydra/errors.py the missing_cfg_file parameter of the `MissingConfigException` should be defaulted to `None` since it is optional, otherwise deserialization will fail. ## Checklist - [x] I checked on latest commit [7bc2b1a] of errors.py (https://github.com/facebookresearch/hydra/commit/7bc2b1ad66da91a12c6158f9413c908b211bff1e) - [x] I created a minimal repro (See [this](https://stackoverflow.com/help/minimal-reproducible-example) for tips). ## To reproduce ** Minimal Code/Config snippet to reproduce ** ```python import pickle import hydra e = hydra.errors.MissingConfigException("missing", "file") x = pickle.dumps(e) y = pickle.loads(x) ``` ** Stack trace/error message ** ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: __init__() missing 1 required positional argument: 'missing_cfg_file' ``` ## Expected Behavior successful deserialization: ``` >>> y MissingConfigException('missing') ``` ## System information - **Hydra Version** : hydra-core==1.3.1 - **Python version** : Python 3.8.13 - **Virtual environment type and version** : None - **Operating system** : Ubuntu 22.04.1 LT ## Additional context This exception was serialized/deserialized when using ray tune.
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Optional, Sequence\n\n\nclass HydraException(Exception):\n ...\n\n\nclass CompactHydraException(HydraException):\n ...\n\n\nclass OverrideParseException(CompactHydraException):\n def __init__(self, override: str, message: str) -> None:\n super(OverrideParseException, self).__init__(message)\n self.override = override\n self.message = message\n\n\nclass InstantiationException(CompactHydraException):\n ...\n\n\nclass ConfigCompositionException(CompactHydraException):\n ...\n\n\nclass SearchPathException(CompactHydraException):\n ...\n\n\nclass MissingConfigException(IOError, ConfigCompositionException):\n def __init__(\n self,\n message: str,\n missing_cfg_file: Optional[str],\n options: Optional[Sequence[str]] = None,\n ) -> None:\n super(MissingConfigException, self).__init__(message)\n self.missing_cfg_file = missing_cfg_file\n self.options = options\n\n\nclass HydraDeprecationError(HydraException):\n ...\n", "path": "hydra/errors.py" } ]
[ { "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Optional, Sequence\n\n\nclass HydraException(Exception):\n ...\n\n\nclass CompactHydraException(HydraException):\n ...\n\n\nclass OverrideParseException(CompactHydraException):\n def __init__(self, override: str, message: str) -> None:\n super(OverrideParseException, self).__init__(message)\n self.override = override\n self.message = message\n\n\nclass InstantiationException(CompactHydraException):\n ...\n\n\nclass ConfigCompositionException(CompactHydraException):\n ...\n\n\nclass SearchPathException(CompactHydraException):\n ...\n\n\nclass MissingConfigException(IOError, ConfigCompositionException):\n def __init__(\n self,\n message: str,\n missing_cfg_file: Optional[str] = None,\n options: Optional[Sequence[str]] = None,\n ) -> None:\n super(MissingConfigException, self).__init__(message)\n self.missing_cfg_file = missing_cfg_file\n self.options = options\n\n\nclass HydraDeprecationError(HydraException):\n ...\n", "path": "hydra/errors.py" } ]
diff --git a/hydra/errors.py b/hydra/errors.py index 16849a2f550..427e8375506 100644 --- a/hydra/errors.py +++ b/hydra/errors.py @@ -33,7 +33,7 @@ class MissingConfigException(IOError, ConfigCompositionException): def __init__( self, message: str, - missing_cfg_file: Optional[str], + missing_cfg_file: Optional[str] = None, options: Optional[Sequence[str]] = None, ) -> None: super(MissingConfigException, self).__init__(message) diff --git a/tests/test_errors.py b/tests/test_errors.py new file mode 100644 index 00000000000..304d6947774 --- /dev/null +++ b/tests/test_errors.py @@ -0,0 +1,14 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import pickle + +from hydra.errors import MissingConfigException + + +def test_pickle_missing_config_exception() -> None: + exception = MissingConfigException("msg", "filename", ["option1", "option2"]) + x = pickle.dumps(exception) + loaded = pickle.loads(x) + assert isinstance(loaded, MissingConfigException) + assert loaded.args == ("msg",) + assert loaded.missing_cfg_file == "filename" + assert loaded.options == ["option1", "option2"]
conan-io__conan-center-index-2625
[package] libx265/3.2.1: hash of source tar.gz changed ### Package and Environment Details (include every applicable attribute) * Package Name/Version: **libx265/3.2.1** * Operating System+version: **Linux Ubuntu 18.04 and 20.04 ** * Compiler+version: **GCC 7.5 and 8** * Conan version: **conan 1.28.1** * Python version: **Python 3.6.8** ### Conan profile ``` Configuration for profile default: [settings] os=Linux os_build=Linux arch=x86_64 arch_build=x86_64 compiler=gcc compiler.version=7.5 compiler.libcxx=libstdc++11 build_type=Release [options] [build_requires] [env] ``` ### Steps to reproduce (Include if Applicable) Try to build libx265/3.2.1 from source. ### Logs (Include/Attach if Applicable) <details><summary>Click to expand log</summary> ``` ERROR: libx265/3.2.1: Error in source() method, line 50 tools.get(**self.conan_data["sources"][self.version]) ConanException: sha256 signature failed for 'x265_3.2.1.tar.gz' file. Provided signature: 7cf8ed2927fcb2914cdca51c974594770da705cb43288beea62b69c53725b5d7 Computed signature: fb9badcf92364fd3567f8b5aa0e5e952aeea7a39a2b864387cec31e3b58cbbcc (edited) ``` </details> I'm surprised that this wasn't caught earlier. Indeed, when downloading the file manually and computing the sha256sum I get the same as the computed signature above. What's the best way to make a PR for this? Would we bump the recipe version or just overwrite the current 3.2.1?
[ { "content": "from conans import CMake, ConanFile, tools\nimport os\nimport shutil\n\n\nclass Libx265Conan(ConanFile):\n name = \"libx265\"\n description = \"x265 is the leading H.265 / HEVC encoder software library\"\n topics = (\"conan\", \"libx265\", \"codec\", \"video\", \"H.265\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \" https://bitbucket.org/multicoreware/x265\"\n exports_sources = [\"CMakeLists.txt\", \"patches/*\"]\n generators = \"cmake\"\n license = (\"GPL-2.0-only\", \"commercial\") # https://bitbucket.org/multicoreware/x265/src/default/COPYING\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"bit_depth\": [8, 10, 12],\n \"HDR10\": [True, False],\n \"SVG_HEVC_encoder\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"bit_depth\": 8,\n \"HDR10\": False,\n \"SVG_HEVC_encoder\": False,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"x265_{}\".format(self.version), self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"ENABLE_SHARED\"] = self.options.shared\n self._cmake.definitions[\"ENABLE_LIBNUMA\"] = False\n if self.settings.os == \"Macos\":\n self._cmake.definitions[\"CMAKE_SHARED_LINKER_FLAGS\"] = \"-Wl,-read_only_relocs,suppress\"\n self._cmake.definitions[\"HIGH_BIT_DEPTH\"] = self.options.bit_depth != 8\n self._cmake.definitions[\"MAIN12\"] = self.options.bit_depth == 12\n self._cmake.definitions[\"ENABLE_HDR10_PLUS\"] = self.options.HDR10\n self._cmake.definitions[\"ENABLE_SVT_HEVC\"] = self.options.SVG_HEVC_encoder\n if self.settings.compiler == \"Visual Studio\":\n self._cmake.definitions[\"STATIC_LINK_CRT\"] = \"T\" in str(self.settings.compiler.runtime)\n if self.settings.os == \"Linux\":\n self._cmake.definitions[\"PLATFORM_LIBS\"] = \"dl\"\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def _patch_sources(self):\n cmakelists = os.path.join(self._source_subfolder, \"source\", \"CMakeLists.txt\")\n tools.replace_in_file(cmakelists,\n \"if((WIN32 AND ENABLE_CLI) OR (WIN32 AND ENABLE_SHARED))\",\n \"if(FALSE)\")\n if self.settings.os == \"Android\":\n tools.replace_in_file(cmakelists,\n \"list(APPEND PLATFORM_LIBS pthread)\", \"\")\n tools.replace_in_file(cmakelists,\n \"list(APPEND PLATFORM_LIBS rt)\", \"\")\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n cmake = self._configure_cmake()\n cmake.install()\n\n if self.options.shared:\n if self.settings.compiler == \"Visual Studio\":\n static_lib = \"x265-static.lib\"\n else:\n static_lib = \"libx265.a\"\n os.unlink(os.path.join(self.package_folder, \"lib\", static_lib))\n\n if self.settings.compiler == \"Visual Studio\":\n name = \"libx265.lib\" if self.options.shared else \"x265-static.lib\"\n shutil.move(os.path.join(self.package_folder, \"lib\", name),\n os.path.join(self.package_folder, \"lib\", \"x265.lib\"))\n\n if self.settings.os != \"Windows\" or not self.options.shared:\n tools.rmdir(os.path.join(self.package_folder, \"bin\"))\n else:\n for file in os.listdir(os.path.join(self.package_folder, \"bin\")):\n if not file.endswith(\".dll\"):\n os.unlink(os.path.join(self.package_folder, \"bin\", file))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def package_info(self):\n self.cpp_info.names[\"pkg_config\"] = \"x265\"\n self.cpp_info.libs = [\"x265\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"dl\", \"pthread\", \"m\"])\n if self.settings.os == \"Android\":\n self.cpp_info.libs.extend([\"dl\", \"m\"])\n libcxx = self.settings.get_safe(\"compiler.libcxx\")\n if libcxx in [\"libstdc++\", \"libstdc++11\"]:\n self.cpp_info.system_libs.append(\"stdc++\")\n elif libcxx == \"libc++\":\n self.cpp_info.system_libs.append(\"c++\")\n elif libcxx in [\"c++_static\", \"c++_shared\"]:\n self.cpp_info.system_libs.extend([libcxx, \"c++abi\"])\n", "path": "recipes/libx265/all/conanfile.py" } ]
[ { "content": "from conans import CMake, ConanFile, tools\nimport os\nimport shutil\n\n\nclass Libx265Conan(ConanFile):\n name = \"libx265\"\n description = \"x265 is the leading H.265 / HEVC encoder software library\"\n topics = (\"conan\", \"libx265\", \"codec\", \"video\", \"H.265\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \" https://bitbucket.org/multicoreware/x265\"\n exports_sources = [\"CMakeLists.txt\", \"patches/*\"]\n generators = \"cmake\"\n license = (\"GPL-2.0-only\", \"commercial\") # https://bitbucket.org/multicoreware/x265/src/default/COPYING\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"bit_depth\": [8, 10, 12],\n \"HDR10\": [True, False],\n \"SVG_HEVC_encoder\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"bit_depth\": 8,\n \"HDR10\": False,\n \"SVG_HEVC_encoder\": False,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"x265-{}\".format(self.version), self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"ENABLE_SHARED\"] = self.options.shared\n self._cmake.definitions[\"ENABLE_LIBNUMA\"] = False\n if self.settings.os == \"Macos\":\n self._cmake.definitions[\"CMAKE_SHARED_LINKER_FLAGS\"] = \"-Wl,-read_only_relocs,suppress\"\n self._cmake.definitions[\"HIGH_BIT_DEPTH\"] = self.options.bit_depth != 8\n self._cmake.definitions[\"MAIN12\"] = self.options.bit_depth == 12\n self._cmake.definitions[\"ENABLE_HDR10_PLUS\"] = self.options.HDR10\n self._cmake.definitions[\"ENABLE_SVT_HEVC\"] = self.options.SVG_HEVC_encoder\n if self.settings.compiler == \"Visual Studio\":\n self._cmake.definitions[\"STATIC_LINK_CRT\"] = \"T\" in str(self.settings.compiler.runtime)\n if self.settings.os == \"Linux\":\n self._cmake.definitions[\"PLATFORM_LIBS\"] = \"dl\"\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def _patch_sources(self):\n cmakelists = os.path.join(self._source_subfolder, \"source\", \"CMakeLists.txt\")\n tools.replace_in_file(cmakelists,\n \"if((WIN32 AND ENABLE_CLI) OR (WIN32 AND ENABLE_SHARED))\",\n \"if(FALSE)\")\n if self.settings.os == \"Android\":\n tools.replace_in_file(cmakelists,\n \"list(APPEND PLATFORM_LIBS pthread)\", \"\")\n tools.replace_in_file(cmakelists,\n \"list(APPEND PLATFORM_LIBS rt)\", \"\")\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n cmake = self._configure_cmake()\n cmake.install()\n\n if self.options.shared:\n if self.settings.compiler == \"Visual Studio\":\n static_lib = \"x265-static.lib\"\n else:\n static_lib = \"libx265.a\"\n os.unlink(os.path.join(self.package_folder, \"lib\", static_lib))\n\n if self.settings.compiler == \"Visual Studio\":\n name = \"libx265.lib\" if self.options.shared else \"x265-static.lib\"\n shutil.move(os.path.join(self.package_folder, \"lib\", name),\n os.path.join(self.package_folder, \"lib\", \"x265.lib\"))\n\n if self.settings.os != \"Windows\" or not self.options.shared:\n tools.rmdir(os.path.join(self.package_folder, \"bin\"))\n else:\n for file in os.listdir(os.path.join(self.package_folder, \"bin\")):\n if not file.endswith(\".dll\"):\n os.unlink(os.path.join(self.package_folder, \"bin\", file))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def package_info(self):\n self.cpp_info.names[\"pkg_config\"] = \"x265\"\n self.cpp_info.libs = [\"x265\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"dl\", \"pthread\", \"m\"])\n if self.settings.os == \"Android\":\n self.cpp_info.libs.extend([\"dl\", \"m\"])\n libcxx = self.settings.get_safe(\"compiler.libcxx\")\n if libcxx in [\"libstdc++\", \"libstdc++11\"]:\n self.cpp_info.system_libs.append(\"stdc++\")\n elif libcxx == \"libc++\":\n self.cpp_info.system_libs.append(\"c++\")\n elif libcxx in [\"c++_static\", \"c++_shared\"]:\n self.cpp_info.system_libs.extend([libcxx, \"c++abi\"])\n", "path": "recipes/libx265/all/conanfile.py" } ]
diff --git a/recipes/libx265/all/conandata.yml b/recipes/libx265/all/conandata.yml index 4e8beb4d7bc64..18cf86ce2cf52 100644 --- a/recipes/libx265/all/conandata.yml +++ b/recipes/libx265/all/conandata.yml @@ -1,7 +1,7 @@ sources: 3.2.1: - url: "https://bitbucket.org/multicoreware/x265/downloads/x265_3.2.1.tar.gz" - sha256: "7cf8ed2927fcb2914cdca51c974594770da705cb43288beea62b69c53725b5d7" + url: "https://github.com/videolan/x265/archive/3.2.1.tar.gz" + sha256: "b5ee7ea796a664d6e2763f9c0ae281fac5d25892fc2cb134698547103466a06a" patches: 3.2.1: - patch_file: "patches/0001-remove_register_classifier.patch" diff --git a/recipes/libx265/all/conanfile.py b/recipes/libx265/all/conanfile.py index 92ed60743df05..38352a867e7b5 100644 --- a/recipes/libx265/all/conanfile.py +++ b/recipes/libx265/all/conanfile.py @@ -48,7 +48,7 @@ def configure(self): def source(self): tools.get(**self.conan_data["sources"][self.version]) - os.rename("x265_{}".format(self.version), self._source_subfolder) + os.rename("x265-{}".format(self.version), self._source_subfolder) def _configure_cmake(self): if self._cmake: diff --git a/recipes/libx265/all/test_package/CMakeLists.txt b/recipes/libx265/all/test_package/CMakeLists.txt index 56a1bba89a19d..196188113685c 100644 --- a/recipes/libx265/all/test_package/CMakeLists.txt +++ b/recipes/libx265/all/test_package/CMakeLists.txt @@ -1,8 +1,6 @@ -cmake_minimum_required(VERSION 2.8.12) +cmake_minimum_required(VERSION 3.1) project(test_package) -set(CMAKE_VERBOSE_MAKEFILE TRUE) - include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) conan_basic_setup()
pyqtgraph__pyqtgraph-2648
ColorMap.getByIndex() returns wrong colors <!-- In the following, please describe your issue in detail! --> <!-- If some sections do not apply, just remove them. --> ### Short description <!-- This should summarize the issue. --> ColorMap.getByIndex() returns wrong colors. ### Code to reproduce <!-- Please provide a minimal working example that reproduces the issue in the code block below. Ideally, this should be a full example someone else could run without additional setup. --> ```python In [1]: import pyqtgraph as pg In [2]: cm = pg.ColorMap([0.0, 1.0], [(0,0,0), (255,0,0)]) In [3]: cm.getByIndex(0) Out[3]: PySide6.QtGui.QColor.fromRgbF(0.000000, 0.000000, 0.000000, 0.003922) In [4]: cm.getByIndex(1) Out[4]: PySide6.QtGui.QColor.fromRgbF(0.003922, 0.000000, 0.000000, 0.003922) ``` ### Tested environment(s) * PyQtGraph version: 0.13.2.dev0
[ { "content": "from collections.abc import Callable, Sequence\nfrom os import listdir, path\n\nimport numpy as np\n\nfrom .functions import clip_array, clip_scalar, colorDistance, eq, mkColor\nfrom .Qt import QtCore, QtGui\n\n__all__ = ['ColorMap']\n\n_mapCache = {}\n\ndef listMaps(source=None):\n \"\"\"\n .. warning:: Experimental, subject to change.\n\n List available color maps.\n\n Parameters\n ----------\n source: str, optional\n Color map source. If omitted, locally stored maps are listed. Otherwise:\n\n - 'matplotlib' lists maps that can be imported from Matplotlib\n - 'colorcet' lists maps that can be imported from ColorCET\n\n Returns\n -------\n list of str\n Known color map names.\n \"\"\"\n if source is None:\n pathname = path.join(path.dirname(__file__), 'colors','maps')\n files = listdir( pathname )\n list_of_maps = []\n for filename in files:\n if filename[-4:] == '.csv' or filename[-4:] == '.hex':\n list_of_maps.append(filename[:-4])\n return list_of_maps\n elif source.lower() == 'matplotlib':\n try:\n import matplotlib.pyplot as mpl_plt\n list_of_maps = mpl_plt.colormaps()\n return list_of_maps\n except ModuleNotFoundError:\n return []\n elif source.lower() == 'colorcet':\n try:\n import colorcet\n list_of_maps = list( colorcet.palette.keys() )\n list_of_maps.sort()\n return list_of_maps\n except ModuleNotFoundError:\n return []\n return []\n\n\ndef get(name, source=None, skipCache=False):\n \"\"\"\n .. warning:: Experimental, subject to change.\n\n Returns a ColorMap object from a local definition or imported from another library.\n The generated ColorMap objects are cached for fast repeated access.\n\n Parameters\n ----------\n name: str\n Name of color map. In addition to the included maps, this can also\n be a path to a file in the local folder. See the files in the\n ``pyqtgraph/colors/maps/`` folder for examples of the format.\n source: str, optional\n If omitted, a locally stored map is returned. Otherwise:\n\n - 'matplotlib' imports a map defined by Matplotlib.\n - 'colorcet' imports a map defined by ColorCET.\n\n skipCache: bool, optional\n If `skipCache=True`, the internal cache is skipped and a new\n ColorMap object is generated. This can load an unaltered copy\n when the previous ColorMap object has been modified.\n \"\"\"\n if not skipCache and name in _mapCache:\n return _mapCache[name]\n if source is None:\n return _getFromFile(name)\n elif source == 'matplotlib':\n return getFromMatplotlib(name)\n elif source == 'colorcet':\n return getFromColorcet(name)\n return None\n\ndef _getFromFile(name):\n filename = name\n if filename[0] !='.': # load from built-in directory\n dirname = path.dirname(__file__)\n filename = path.join(dirname, 'colors/maps/'+filename)\n if not path.isfile( filename ): # try suffixes if file is not found:\n if path.isfile( filename+'.csv' ): filename += '.csv'\n elif path.isfile( filename+'.hex' ): filename += '.hex'\n with open(filename,'r') as fh:\n idx = 0\n color_list = []\n if filename[-4:].lower() != '.hex':\n csv_mode = True\n else:\n csv_mode = False\n for line in fh:\n line = line.strip()\n if len(line) == 0: continue # empty line\n if line[0] == ';': continue # comment\n parts = line.split(sep=';', maxsplit=1) # split into color and names/comments\n if csv_mode:\n comp = parts[0].split(',')\n if len( comp ) < 3: continue # not enough components given\n color_tuple = tuple( [ int(255*float(c)+0.5) for c in comp ] )\n else:\n hex_str = parts[0]\n if hex_str[0] == '#':\n hex_str = hex_str[1:] # strip leading #\n if len(hex_str) < 3: continue # not enough information\n if len(hex_str) == 3: # parse as abbreviated RGB\n hex_str = 2*hex_str[0] + 2*hex_str[1] + 2*hex_str[2]\n elif len(hex_str) == 4: # parse as abbreviated RGBA\n hex_str = 2*hex_str[0] + 2*hex_str[1] + 2*hex_str[2] + 2*hex_str[3]\n if len(hex_str) < 6: continue # not enough information\n try:\n color_tuple = tuple( bytes.fromhex( hex_str ) )\n except ValueError as e:\n raise ValueError(f\"failed to convert hexadecimal value '{hex_str}'.\") from e\n color_list.append( color_tuple )\n idx += 1\n # end of line reading loop\n # end of open\n cmap = ColorMap( name=name,\n pos=np.linspace(0.0, 1.0, len(color_list)),\n color=color_list) #, names=color_names)\n if cmap is not None:\n cmap.name = name\n _mapCache[name] = cmap\n return cmap\n\ndef getFromMatplotlib(name):\n \"\"\" \n Generates a ColorMap object from a Matplotlib definition.\n Same as ``colormap.get(name, source='matplotlib')``.\n \"\"\"\n # inspired and informed by \"mpl_cmaps_in_ImageItem.py\", published by Sebastian Hoefer at \n # https://github.com/honkomonk/pyqtgraph_sandbox/blob/master/mpl_cmaps_in_ImageItem.py\n try:\n import matplotlib.pyplot as mpl_plt\n except ModuleNotFoundError:\n return None\n cmap = None\n col_map = mpl_plt.get_cmap(name)\n if hasattr(col_map, '_segmentdata'): # handle LinearSegmentedColormap\n data = col_map._segmentdata\n if ('red' in data) and isinstance(data['red'], (Sequence, np.ndarray)):\n positions = set() # super-set of handle positions in individual channels\n for key in ['red','green','blue']:\n for tup in data[key]:\n positions.add(tup[0])\n col_data = np.zeros((len(positions),4 ))\n col_data[:,-1] = sorted(positions)\n for idx, key in enumerate(['red','green','blue']):\n positions = np.zeros( len(data[key] ) )\n comp_vals = np.zeros( len(data[key] ) )\n for idx2, tup in enumerate( data[key] ):\n positions[idx2] = tup[0]\n comp_vals[idx2] = tup[1] # these are sorted in the raw data\n col_data[:,idx] = np.interp(col_data[:,3], positions, comp_vals)\n cmap = ColorMap(pos=col_data[:,-1], color=255*col_data[:,:3]+0.5)\n # some color maps (gnuplot in particular) are defined by RGB component functions:\n elif ('red' in data) and isinstance(data['red'], Callable):\n col_data = np.zeros((64, 4))\n col_data[:,-1] = np.linspace(0., 1., 64)\n for idx, key in enumerate(['red','green','blue']):\n col_data[:,idx] = np.clip( data[key](col_data[:,-1]), 0, 1)\n cmap = ColorMap(pos=col_data[:,-1], color=255*col_data[:,:3]+0.5)\n elif hasattr(col_map, 'colors'): # handle ListedColormap\n col_data = np.array(col_map.colors)\n cmap = ColorMap( name=name,\n pos = np.linspace(0.0, 1.0, col_data.shape[0]), color=255*col_data[:,:3]+0.5 )\n if cmap is not None:\n cmap.name = name\n _mapCache[name] = cmap\n return cmap\n\ndef getFromColorcet(name):\n \"\"\" Generates a ColorMap object from a colorcet definition. Same as ``colormap.get(name, source='colorcet')``. \"\"\"\n try:\n import colorcet\n except ModuleNotFoundError:\n return None\n color_strings = colorcet.palette[name]\n color_list = []\n for hex_str in color_strings:\n if hex_str[0] != '#': continue\n if len(hex_str) != 7:\n raise ValueError(f\"Invalid color string '{hex_str}' in colorcet import.\")\n color_tuple = tuple( bytes.fromhex( hex_str[1:] ) )\n color_list.append( color_tuple )\n if len(color_list) == 0:\n return None\n cmap = ColorMap( name=name,\n pos=np.linspace(0.0, 1.0, len(color_list)), \n color=color_list) #, names=color_names)\n if cmap is not None:\n cmap.name = name\n _mapCache[name] = cmap\n return cmap\n \ndef makeHslCycle( hue=0.0, saturation=1.0, lightness=0.5, steps=36 ):\n \"\"\"\n Returns a ColorMap object that traces a circular or spiraling path around the HSL color space.\n\n Parameters\n ----------\n hue : float or tuple of floats\n Starting point or (start, end) for hue. Values can lie outside the [0 to 1] range \n to realize multiple cycles. For a single value, one full hue cycle is generated.\n The default starting hue is 0.0 (red). \n saturation : float or tuple of floats, optional\n Saturation value for the colors in the cycle, in the range of [0 to 1]. \n If a (start, end) tuple is given, saturation gradually changes between these values.\n The default saturation is 1.0.\n lightness : float or tuple of floats, optional\n Lightness value for the colors in the cycle, in the range of [0 to 1]. \n If a (start, end) tuple is given, lightness gradually changes between these values.\n The default lightness is 1.0.\n steps: int, optional\n Number of steps in the cycle. Between these steps, the color map will interpolate in RGB space.\n The default number of steps is 36, generating a color map with 37 stops.\n \"\"\"\n if isinstance( hue, (tuple, list) ):\n hueA, hueB = hue\n else:\n hueA = hue\n hueB = hueA + 1.0\n if isinstance( saturation, (tuple, list) ):\n satA, satB = saturation\n else:\n satA = satB = saturation\n if isinstance( lightness, (tuple, list) ):\n lgtA, lgtB = lightness\n else:\n lgtA = lgtB = lightness\n hue_vals = np.linspace(hueA, hueB, num=steps+1)\n sat_vals = np.linspace(satA, satB, num=steps+1)\n lgt_vals = np.linspace(lgtA, lgtB, num=steps+1)\n color_list = []\n for hue, sat, lgt in zip( hue_vals, sat_vals, lgt_vals):\n qcol = QtGui.QColor.fromHslF( hue%1.0, sat, lgt )\n color_list.append( qcol )\n name = f'Hue {hueA:0.2f}-{hueB:0.2f}'\n return ColorMap( None, color_list, name=name )\n\ndef makeMonochrome(color='neutral'):\n \"\"\"\n Returns a ColorMap object with a dark to bright ramp and adjustable tint.\n \n In addition to neutral, warm or cold grays, imitations of monochrome computer monitors are also\n available. The following predefined color ramps are available:\n `neutral`, `warm`, `cool`, `green`, `amber`, `blue`, `red`, `pink`, `lavender`.\n \n The ramp can also be specified by a tuple of float values in the range of 0 to 1.\n In this case `(h, s, l0, l1)` describe hue, saturation, minimum lightness and maximum lightness\n within the HSL color space. The values `l0` and `l1` can be omitted. They default to \n `l0=0.0` and `l1=1.0` in this case.\n\n Parameters\n ----------\n color: str or tuple of floats\n Color description. Can be one of the predefined identifiers, or a tuple\n `(h, s, l0, l1)`, `(h, s)` or (`h`).\n 'green', 'amber', 'blue', 'red', 'lavender', 'pink'\n or a tuple of relative ``(R,G,B)`` contributions in range 0.0 to 1.0\n \"\"\"\n name=f'Monochrome {color}'\n defaults = {\n 'neutral': (0.00, 0.00, 0.00, 1.00),\n 'warm' : (0.10, 0.08, 0.00, 0.95),\n 'cool' : (0.60, 0.08, 0.00, 0.95),\n 'green' : (0.35, 0.55, 0.02, 0.90),\n 'amber' : (0.09, 0.80, 0.02, 0.80),\n 'blue' : (0.58, 0.85, 0.02, 0.95),\n 'red' : (0.01, 0.60, 0.02, 0.90),\n 'pink' : (0.93, 0.65, 0.02, 0.95),\n 'lavender': (0.75, 0.50, 0.02, 0.90)\n }\n if isinstance(color, str):\n if color in defaults:\n h_val, s_val, l_min, l_max = defaults[color]\n else:\n valid = ','.join(defaults.keys())\n raise ValueError(f\"Undefined color descriptor '{color}', known values are:\\n{valid}\")\n else:\n s_val = 0.70 # set up default values\n l_min = 0.00\n l_max = 1.00\n if not hasattr(color,'__len__'):\n h_val = float(color)\n elif len(color) == 1:\n h_val = color[0]\n elif len(color) == 2:\n h_val, s_val = color\n elif len(color) == 4:\n h_val, s_val, l_min, l_max = color\n else:\n raise ValueError(f\"Invalid color descriptor '{color}'\")\n l_vals = np.linspace(l_min, l_max, num=16)\n color_list = []\n for l_val in l_vals:\n qcol = QtGui.QColor.fromHslF( h_val, s_val, l_val )\n color_list.append( qcol )\n return ColorMap( None, color_list, name=name, linearize=True )\n\ndef modulatedBarData(length=768, width=32):\n \"\"\" \n Returns an NumPy array that represents a modulated color bar ranging from 0 to 1.\n This is used to judge the perceived variation of the color gradient.\n \n Parameters\n ----------\n length: int\n Length of the data set. Values will vary from 0 to 1 over this axis.\n width: int\n Width of the data set. The modulation will vary from 0% to 4% over this axis. \n \"\"\"\n gradient = np.linspace(0.00, 1.00, length)\n modulation = -0.04 * np.sin( (np.pi/4) * np.arange(length) )\n data = np.zeros( (length, width) )\n for idx in range(width):\n data[:,idx] = gradient + (idx/(width-1)) * modulation\n clip_array(data, 0.0, 1.0, out=data)\n return data\n\nclass ColorMap(object):\n \"\"\"\n ColorMap(pos, color, mapping=ColorMap.CLIP)\n\n ColorMap stores a mapping of specific data values to colors, for example:\n\n | 0.0 → black\n | 0.2 → red\n | 0.6 → yellow\n | 1.0 → white\n\n The colors for intermediate values are determined by interpolating between\n the two nearest colors in RGB color space.\n\n A ColorMap object provides access to the interpolated colors by indexing with a float value:\n ``cm[0.5]`` returns a QColor corresponding to the center of ColorMap `cm`.\n \"\"\"\n ## mapping modes\n CLIP = 1\n REPEAT = 2\n MIRROR = 3\n DIVERGING = 4\n\n ## return types\n BYTE = 1\n FLOAT = 2\n QCOLOR = 3\n\n enumMap = {\n 'clip': CLIP,\n 'repeat': REPEAT,\n 'mirror': MIRROR,\n 'diverging': DIVERGING,\n 'byte': BYTE,\n 'float': FLOAT,\n 'qcolor': QCOLOR,\n }\n\n def __init__(self, pos, color, mapping=CLIP, mode=None, linearize=False, name=''):\n \"\"\"\n __init__(pos, color, mapping=ColorMap.CLIP)\n \n Parameters\n ----------\n pos: array_like of float, optional\n Assigned positions of specified colors. `None` sets equal spacing.\n Values need to be in range 0.0-1.0.\n color: array_like of color_like\n List of colors, interpreted via :func:`mkColor() <pyqtgraph.mkColor>`.\n mapping: str or int, optional\n Controls how values outside the 0 to 1 range are mapped to colors.\n See :func:`setMappingMode() <ColorMap.setMappingMode>` for details. \n \n The default of `ColorMap.CLIP` continues to show\n the colors assigned to 0 and 1 for all values below or above this range, respectively.\n \"\"\"\n self.name = name # storing a name helps identify ColorMaps sampled by Palette\n if pos is None:\n order = range(len(color))\n self.pos = np.linspace(0.0, 1.0, num=len(color))\n else:\n self.pos = np.array(pos)\n order = np.argsort(self.pos)\n self.pos = self.pos[order]\n \n self.color = np.zeros( (len(color), 4) ) # stores float rgba values\n for cnt, idx in enumerate(order):\n self.color[cnt] = mkColor(color[idx]).getRgbF()\n # alternative code may be more efficient, but fails to handle lists of QColor.\n # self.color = np.apply_along_axis(\n # func1d = lambda x: np.uint8( mkColor(x).getRgb() ), # cast RGB integer values to uint8\n # axis = -1,\n # arr = color,\n # )[order]\n \n self.mapping_mode = self.CLIP # default to CLIP mode \n if mapping is not None:\n self.setMappingMode( mapping )\n self.stopsCache = {}\n if linearize: self.linearize()\n\n def setMappingMode(self, mapping):\n \"\"\"\n Sets the way that values outside of the range 0 to 1 are mapped to colors.\n\n Parameters\n ----------\n mapping: int or str\n Sets mapping mode to\n\n - `ColorMap.CLIP` or 'clip': Values are clipped to the range 0 to 1. ColorMap defaults to this.\n - `ColorMap.REPEAT` or 'repeat': Colors repeat cyclically, i.e. range 1 to 2 repeats the colors for 0 to 1.\n - `ColorMap.MIRROR` or 'mirror': The range 0 to -1 uses same colors (in reverse order) as 0 to 1.\n - `ColorMap.DIVERGING` or 'diverging': Colors are mapped to -1 to 1 such that the central value appears at 0.\n \"\"\"\n if isinstance(mapping, str):\n mapping = self.enumMap[mapping.lower()]\n if mapping in [self.CLIP, self.REPEAT, self.DIVERGING, self.MIRROR]:\n self.mapping_mode = mapping # only allow defined values\n else:\n raise ValueError(f\"Undefined mapping type '{mapping}'\")\n self.stopsCache = {}\n \n def __str__(self):\n \"\"\" provide human-readable identifier \"\"\"\n if self.name is None:\n return 'unnamed ColorMap({:d})'.format(len(self.pos))\n return \"ColorMap({:d}):'{:s}'\".format(len(self.pos),self.name)\n\n def __getitem__(self, key):\n \"\"\" Convenient shorthand access to palette colors \"\"\"\n if isinstance(key, int): # access by color index\n return self.getByIndex(key)\n # otherwise access by map\n try: # accept any numerical format that converts to float\n float_idx = float(key)\n return self.mapToQColor(float_idx)\n except ValueError: pass\n return None\n\n def linearize(self):\n \"\"\"\n Adjusts the positions assigned to color stops to approximately equalize the perceived color difference\n for a fixed step.\n \"\"\"\n colors = self.getColors(mode=self.QCOLOR)\n distances = colorDistance(colors)\n positions = np.insert( np.cumsum(distances), 0, 0.0 )\n self.pos = positions / positions[-1] # normalize last value to 1.0\n self.stopsCache = {}\n\n def reverse(self):\n \"\"\"\n Reverses the color map, so that the color assigned to a value of 1 now appears at 0 and vice versa.\n This is convenient to adjust imported color maps.\n \"\"\"\n self.pos = 1.0 - np.flip( self.pos )\n self.color = np.flip( self.color, axis=0 )\n self.stopsCache = {}\n \n def getSubset(self, start, span):\n \"\"\"\n Returns a new ColorMap object that extracts the subset specified by 'start' and 'length' \n to the full 0.0 to 1.0 range. A negative length results in a color map that is reversed \n relative to the original.\n \n Parameters\n ----------\n start : float\n Starting value that defines the 0.0 value of the new color map.\n Possible value between 0.0 to 1.0\n span : float\n Span of the extracted region. The original color map will be \n treated as cyclical if the extracted interval exceeds the \n 0.0 to 1.0 range. Possible values between -1.0 to 1.0.\n \"\"\"\n pos, col = self.getStops( mode=ColorMap.FLOAT )\n start = clip_scalar(start, 0.0, 1.0)\n span = clip_scalar(span, -1.0, 1.0)\n\n if span == 0.0:\n raise ValueError(\"'length' needs to be non-zero\")\n stop = (start + span)\n if stop > 1.0 or stop < 0.0: stop = stop % 1.0\n # find indices *inside* range, start and end will be added by sampling later\n if span > 0:\n ref_pos = start # lowest position value at start\n idxA = np.searchsorted( pos, start, side='right' )\n idxB = np.searchsorted( pos, stop , side='left' ) # + 1 # right-side element of interval\n wraps = bool( stop < start ) # wraps around?\n else:\n ref_pos = stop # lowest position value at stop\n idxA = np.searchsorted( pos, stop , side='right')\n idxB = np.searchsorted( pos, start, side='left' ) # + 1 # right-side element of interval\n wraps = bool( stop > start ) # wraps around?\n \n if wraps: # wraps around:\n length1 = (len(pos)-idxA) # before wrap\n length2 = idxB # after wrap\n new_length = length1 + length2 + 2 # combined; plus edge elements\n new_pos = np.zeros( new_length )\n new_col = np.zeros( (new_length, 4) )\n new_pos[ 1:length1+1] = (0 + pos[idxA:] - ref_pos) / span # starting point lie in 0 to 1 range\n new_pos[length1+1:-1] = (1 + pos[:idxB] - ref_pos) / span # end point wrapped to -1 to 0 range\n new_pos[length1] -= np.copysign(1e-6, span) # breaks degeneracy of shifted 0.0 and 1.0 values\n new_col[ 1:length1+1] = col[idxA:]\n new_col[length1+1:-1] = col[:idxB]\n else: # does not wrap around:\n new_length = (idxB - idxA) + 2 # two additional edge values will be added \n new_pos = np.zeros( new_length )\n new_col = np.zeros( (new_length, 4) )\n new_pos[1:-1] = (pos[idxA:idxB] - ref_pos) / span\n new_col[1:-1] = col[idxA:idxB]\n\n if span < 0: # for reversed subsets, positions now progress 0 to -1 and need to be flipped\n new_pos += 1.0\n new_pos = np.flip( new_pos)\n new_col = np.flip( new_col, axis=0 )\n\n new_pos[ 0] = 0.0\n new_col[ 0] = self.mapToFloat(start)\n new_pos[-1] = 1.0\n new_col[-1] = self.mapToFloat(stop)\n\n cmap = ColorMap( pos=new_pos, color=255.*new_col )\n cmap.name = f\"{self.name}[{start:.2f}({span:+.2f})]\"\n return cmap\n\n\n def map(self, data, mode=BYTE):\n \"\"\"\n map(data, mode=ColorMap.BYTE)\n\n Returns an array of colors corresponding to a single value or an array of values.\n Data must be either a scalar position or an array (any shape) of positions.\n\n Parameters\n ----------\n data: float or array_like of float\n Scalar value(s) to be mapped to colors\n\n mode: str or int, optional\n Determines return format:\n\n - `ColorMap.BYTE` or 'byte': Colors are returned as 0-255 unsigned bytes. (default)\n - `ColorMap.FLOAT` or 'float': Colors are returned as 0.0-1.0 floats.\n - `ColorMap.QCOLOR` or 'qcolor': Colors are returned as QColor objects.\n\n Returns\n -------\n np.ndarray of {``ColorMap.BYTE``, ``ColorMap.FLOAT``, QColor}\n for `ColorMap.BYTE` or `ColorMap.FLOAT`:\n\n RGB values for each `data` value, arranged in the same shape as `data`.\n list of QColor\n for `ColorMap.QCOLOR`:\n\n Colors for each `data` value as QColor objects.\n \"\"\"\n if isinstance(mode, str):\n mode = self.enumMap[mode.lower()]\n\n if mode == self.QCOLOR:\n pos, color = self.getStops(self.FLOAT)\n else:\n pos, color = self.getStops(mode)\n\n if np.isscalar(data):\n interp = np.empty((color.shape[1],), dtype=color.dtype)\n else:\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n interp = np.empty(data.shape + (color.shape[1],), dtype=color.dtype)\n\n if self.mapping_mode != self.CLIP:\n if self.mapping_mode == self.REPEAT:\n data = data % 1.0\n elif self.mapping_mode == self.DIVERGING:\n data = (data/2)+0.5\n elif self.mapping_mode == self.MIRROR:\n data = abs(data)\n\n for i in range(color.shape[1]):\n interp[...,i] = np.interp(data, pos, color[:,i])\n\n # Convert to QColor if requested\n if mode == self.QCOLOR:\n if np.isscalar(data):\n return QtGui.QColor.fromRgbF(*interp)\n else:\n return [QtGui.QColor.fromRgbF(*x.tolist()) for x in interp]\n else:\n return interp\n\n def mapToQColor(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.QCOLOR)\n\n def mapToByte(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.BYTE)\n\n def mapToFloat(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.FLOAT)\n\n def getByIndex(self, idx):\n \"\"\"Retrieve a QColor by the index of the stop it is assigned to.\"\"\"\n return QtGui.QColor( *self.color[idx] )\n\n def getGradient(self, p1=None, p2=None):\n \"\"\"\n Returns a QtGui.QLinearGradient corresponding to this ColorMap.\n The span and orientation is given by two points in plot coordinates.\n\n When no parameters are given for `p1` and `p2`, the gradient is mapped to the\n `y` coordinates 0 to 1, unless the color map is defined for a more limited range.\n \n This is a somewhat expensive operation, and it is recommended to store and reuse the returned\n gradient instead of repeatedly regenerating it.\n\n Parameters\n ----------\n p1: QtCore.QPointF, optional\n Starting point (value 0) of the gradient. Default value is QPointF(0., 0.)\n p2: QtCore.QPointF, optional\n End point (value 1) of the gradient. Default parameter `dy` is the span of ``max(pos) - min(pos)``\n over which the color map is defined, typically `dy=1`. Default is QPointF(dy, 0.)\n \"\"\"\n if p1 is None:\n p1 = QtCore.QPointF(0,0)\n if p2 is None:\n p2 = QtCore.QPointF(self.pos.max()-self.pos.min(),0)\n grad = QtGui.QLinearGradient(p1, p2)\n\n pos, color = self.getStops(mode=self.QCOLOR)\n if self.mapping_mode == self.MIRROR:\n pos_n = (1. - np.flip(pos)) / 2\n col_n = np.flip( color, axis=0 )\n pos_p = (1. + pos) / 2\n col_p = color\n pos = np.concatenate( (pos_n, pos_p) )\n color = np.concatenate( (col_n, col_p) )\n grad.setStops(list(zip(pos, color)))\n if self.mapping_mode == self.REPEAT:\n grad.setSpread( QtGui.QGradient.Spread.RepeatSpread )\n return grad\n\n def getBrush(self, span=(0.,1.), orientation='vertical'):\n \"\"\"\n Returns a QBrush painting with the color map applied over the selected span of plot values.\n When the mapping mode is set to `ColorMap.MIRROR`, the selected span includes the color map twice,\n first in reversed order and then normal.\n \n It is recommended to store and reuse this gradient brush instead of regenerating it repeatedly.\n\n Parameters\n ----------\n span : tuple of float, optional\n Span of data values covered by the gradient:\n\n - Color map value 0.0 will appear at `min`,\n - Color map value 1.0 will appear at `max`.\n \n Default value is (0., 1.)\n\n orientation : str, default 'vertical'\n Orientation of the gradient:\n\n - 'vertical': `span` corresponds to the `y` coordinate.\n - 'horizontal': `span` corresponds to the `x` coordinate.\n \"\"\"\n if orientation == 'vertical':\n grad = self.getGradient( p1=QtCore.QPointF(0.,span[0]), p2=QtCore.QPointF(0.,span[1]) )\n elif orientation == 'horizontal':\n grad = self.getGradient( p1=QtCore.QPointF(span[0],0.), p2=QtCore.QPointF(span[1],0.) )\n else:\n raise ValueError(\"Orientation must be 'vertical' or 'horizontal'\")\n return QtGui.QBrush(grad)\n\n def getPen(self, span=(0.,1.), orientation='vertical', width=1.0):\n \"\"\"\n Returns a QPen that draws according to the color map based on vertical or horizontal position.\n \n It is recommended to store and reuse this gradient pen instead of regenerating it repeatedly.\n\n\n Parameters\n ----------\n span : tuple of float\n Span of the data values covered by the gradient:\n\n - Color map value 0.0 will appear at `min`.\n - Color map value 1.0 will appear at `max`.\n\n Default is (0., 1.)\n orientation : str, default 'vertical'\n Orientation of the gradient:\n\n - 'vertical' creates a vertical gradient, where `span` corresponds to the `y` coordinate.\n - 'horizontal' creates a horizontal gradient, where `span` corresponds to the `x` coordinate.\n\n width : int or float\n Width of the pen in pixels on screen.\n \"\"\"\n brush = self.getBrush( span=span, orientation=orientation )\n pen = QtGui.QPen(brush, width)\n pen.setCosmetic(True)\n return pen\n\n def getColors(self, mode=BYTE):\n \"\"\"\n Returns a list of the colors associated with the stops of the color map.\n \n The parameter `mode` can be one of\n - `ColorMap.BYTE` or 'byte' to return colors as RGBA tuples in byte format (0 to 255)\n - `ColorMap.FLOAT` or 'float' to return colors as RGBA tuples in float format (0.0 to 1.0)\n - `ColorMap.QCOLOR` or 'qcolor' to return a list of QColors\n \n The default is byte format.\n \"\"\"\n stops, color = self.getStops(mode=mode)\n return color\n\n def getStops(self, mode=BYTE):\n \"\"\"\n Returns a tuple (stops, colors) containing a list of all stops (ranging 0.0 to 1.0)\n and a list of the associated colors.\n \n The parameter `mode` can be one of\n - `ColorMap.BYTE` or 'byte' to return colors as RGBA tuples in byte format (0 to 255)\n - `ColorMap.FLOAT` or 'float' to return colors as RGBA tuples in float format (0.0 to 1.0)\n - `ColorMap.QCOLOR` or 'qcolor' to return a list of QColors\n\n The default is byte format.\n \"\"\"\n if isinstance(mode, str):\n mode = self.enumMap[mode.lower()]\n\n if mode not in self.stopsCache:\n color = self.color\n if mode == self.BYTE and color.dtype.kind == 'f':\n color = (color*255).astype(np.ubyte)\n elif mode == self.FLOAT and color.dtype.kind != 'f':\n color = color.astype(float) / 255.\n elif mode == self.QCOLOR:\n if color.dtype.kind == 'f':\n factory = QtGui.QColor.fromRgbF\n else:\n factory = QtGui.QColor.fromRgb\n color = [factory(*x.tolist()) for x in color]\n self.stopsCache[mode] = (self.pos, color)\n return self.stopsCache[mode]\n\n def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode=BYTE):\n \"\"\"\n getLookupTable(start=0.0, stop=1.0, nPts=512, alpha=None, mode=ColorMap.BYTE)\n\n Returns an equally-spaced lookup table of RGB(A) values created\n by interpolating the specified color stops.\n\n Parameters\n ----------\n start: float, default=0.0\n The starting value in the lookup table\n stop: float, default=1.0\n The final value in the lookup table\n nPts: int, default=512\n The number of points in the returned lookup table.\n alpha: bool, optional\n Specifies whether or not alpha values are included in the table.\n If alpha is None, it will be automatically determined.\n mode: int or str, default='byte'\n Determines return type as described in :func:`map() <pyqtgraph.ColorMap.map>`, can be\n either `ColorMap.BYTE` (0 to 255), `ColorMap.FLOAT` (0.0 to 1.0) or `ColorMap.QColor`.\n\n Returns\n -------\n np.ndarray of {``ColorMap.BYTE``, ``ColorMap.FLOAT``}\n for `ColorMap.BYTE` or `ColorMap.FLOAT`:\n\n RGB values for each `data` value, arranged in the same shape as `data`.\n If alpha values are included the array has shape (`nPts`, 4), otherwise (`nPts`, 3).\n \n list of QColor\n for `ColorMap.QCOLOR`:\n\n Colors for each `data` value as QColor objects.\n \"\"\"\n if isinstance(mode, str):\n mode = self.enumMap[mode.lower()]\n\n if alpha is None:\n alpha = self.usesAlpha()\n\n x = np.linspace(start, stop, nPts)\n table = self.map(x, mode)\n\n if not alpha and mode != self.QCOLOR:\n return table[:,:3]\n else:\n return table\n\n def usesAlpha(self):\n \"\"\"Returns `True` if any stops have assigned colors with alpha < 255.\"\"\"\n max = 1.0 if self.color.dtype.kind == 'f' else 255\n return np.any(self.color[:,3] != max)\n\n def isMapTrivial(self):\n \"\"\"\n Returns `True` if the gradient has exactly two stops in it: Black at 0.0 and white at 1.0.\n \"\"\"\n if len(self.pos) != 2:\n return False\n if self.pos[0] != 0.0 or self.pos[1] != 1.0:\n return False\n if self.color.dtype.kind == 'f':\n return np.all(self.color == np.array([[0.,0.,0.,1.], [1.,1.,1.,1.]]))\n else:\n return np.all(self.color == np.array([[0,0,0,255], [255,255,255,255]]))\n\n def __repr__(self):\n pos = repr(self.pos).replace('\\n', '')\n color = repr(self.color).replace('\\n', '')\n return \"ColorMap(%s, %s)\" % (pos, color)\n\n def __eq__(self, other):\n if other is None:\n return False\n return eq(self.pos, other.pos) and eq(self.color, other.color)\n", "path": "pyqtgraph/colormap.py" } ]
[ { "content": "from collections.abc import Callable, Sequence\nfrom os import listdir, path\n\nimport numpy as np\n\nfrom .functions import clip_array, clip_scalar, colorDistance, eq, mkColor\nfrom .Qt import QtCore, QtGui\n\n__all__ = ['ColorMap']\n\n_mapCache = {}\n\ndef listMaps(source=None):\n \"\"\"\n .. warning:: Experimental, subject to change.\n\n List available color maps.\n\n Parameters\n ----------\n source: str, optional\n Color map source. If omitted, locally stored maps are listed. Otherwise:\n\n - 'matplotlib' lists maps that can be imported from Matplotlib\n - 'colorcet' lists maps that can be imported from ColorCET\n\n Returns\n -------\n list of str\n Known color map names.\n \"\"\"\n if source is None:\n pathname = path.join(path.dirname(__file__), 'colors','maps')\n files = listdir( pathname )\n list_of_maps = []\n for filename in files:\n if filename[-4:] == '.csv' or filename[-4:] == '.hex':\n list_of_maps.append(filename[:-4])\n return list_of_maps\n elif source.lower() == 'matplotlib':\n try:\n import matplotlib.pyplot as mpl_plt\n list_of_maps = mpl_plt.colormaps()\n return list_of_maps\n except ModuleNotFoundError:\n return []\n elif source.lower() == 'colorcet':\n try:\n import colorcet\n list_of_maps = list( colorcet.palette.keys() )\n list_of_maps.sort()\n return list_of_maps\n except ModuleNotFoundError:\n return []\n return []\n\n\ndef get(name, source=None, skipCache=False):\n \"\"\"\n .. warning:: Experimental, subject to change.\n\n Returns a ColorMap object from a local definition or imported from another library.\n The generated ColorMap objects are cached for fast repeated access.\n\n Parameters\n ----------\n name: str\n Name of color map. In addition to the included maps, this can also\n be a path to a file in the local folder. See the files in the\n ``pyqtgraph/colors/maps/`` folder for examples of the format.\n source: str, optional\n If omitted, a locally stored map is returned. Otherwise:\n\n - 'matplotlib' imports a map defined by Matplotlib.\n - 'colorcet' imports a map defined by ColorCET.\n\n skipCache: bool, optional\n If `skipCache=True`, the internal cache is skipped and a new\n ColorMap object is generated. This can load an unaltered copy\n when the previous ColorMap object has been modified.\n \"\"\"\n if not skipCache and name in _mapCache:\n return _mapCache[name]\n if source is None:\n return _getFromFile(name)\n elif source == 'matplotlib':\n return getFromMatplotlib(name)\n elif source == 'colorcet':\n return getFromColorcet(name)\n return None\n\ndef _getFromFile(name):\n filename = name\n if filename[0] !='.': # load from built-in directory\n dirname = path.dirname(__file__)\n filename = path.join(dirname, 'colors/maps/'+filename)\n if not path.isfile( filename ): # try suffixes if file is not found:\n if path.isfile( filename+'.csv' ): filename += '.csv'\n elif path.isfile( filename+'.hex' ): filename += '.hex'\n with open(filename,'r') as fh:\n idx = 0\n color_list = []\n if filename[-4:].lower() != '.hex':\n csv_mode = True\n else:\n csv_mode = False\n for line in fh:\n line = line.strip()\n if len(line) == 0: continue # empty line\n if line[0] == ';': continue # comment\n parts = line.split(sep=';', maxsplit=1) # split into color and names/comments\n if csv_mode:\n comp = parts[0].split(',')\n if len( comp ) < 3: continue # not enough components given\n color_tuple = tuple( [ int(255*float(c)+0.5) for c in comp ] )\n else:\n hex_str = parts[0]\n if hex_str[0] == '#':\n hex_str = hex_str[1:] # strip leading #\n if len(hex_str) < 3: continue # not enough information\n if len(hex_str) == 3: # parse as abbreviated RGB\n hex_str = 2*hex_str[0] + 2*hex_str[1] + 2*hex_str[2]\n elif len(hex_str) == 4: # parse as abbreviated RGBA\n hex_str = 2*hex_str[0] + 2*hex_str[1] + 2*hex_str[2] + 2*hex_str[3]\n if len(hex_str) < 6: continue # not enough information\n try:\n color_tuple = tuple( bytes.fromhex( hex_str ) )\n except ValueError as e:\n raise ValueError(f\"failed to convert hexadecimal value '{hex_str}'.\") from e\n color_list.append( color_tuple )\n idx += 1\n # end of line reading loop\n # end of open\n cmap = ColorMap( name=name,\n pos=np.linspace(0.0, 1.0, len(color_list)),\n color=color_list) #, names=color_names)\n if cmap is not None:\n cmap.name = name\n _mapCache[name] = cmap\n return cmap\n\ndef getFromMatplotlib(name):\n \"\"\" \n Generates a ColorMap object from a Matplotlib definition.\n Same as ``colormap.get(name, source='matplotlib')``.\n \"\"\"\n # inspired and informed by \"mpl_cmaps_in_ImageItem.py\", published by Sebastian Hoefer at \n # https://github.com/honkomonk/pyqtgraph_sandbox/blob/master/mpl_cmaps_in_ImageItem.py\n try:\n import matplotlib.pyplot as mpl_plt\n except ModuleNotFoundError:\n return None\n cmap = None\n col_map = mpl_plt.get_cmap(name)\n if hasattr(col_map, '_segmentdata'): # handle LinearSegmentedColormap\n data = col_map._segmentdata\n if ('red' in data) and isinstance(data['red'], (Sequence, np.ndarray)):\n positions = set() # super-set of handle positions in individual channels\n for key in ['red','green','blue']:\n for tup in data[key]:\n positions.add(tup[0])\n col_data = np.zeros((len(positions),4 ))\n col_data[:,-1] = sorted(positions)\n for idx, key in enumerate(['red','green','blue']):\n positions = np.zeros( len(data[key] ) )\n comp_vals = np.zeros( len(data[key] ) )\n for idx2, tup in enumerate( data[key] ):\n positions[idx2] = tup[0]\n comp_vals[idx2] = tup[1] # these are sorted in the raw data\n col_data[:,idx] = np.interp(col_data[:,3], positions, comp_vals)\n cmap = ColorMap(pos=col_data[:,-1], color=255*col_data[:,:3]+0.5)\n # some color maps (gnuplot in particular) are defined by RGB component functions:\n elif ('red' in data) and isinstance(data['red'], Callable):\n col_data = np.zeros((64, 4))\n col_data[:,-1] = np.linspace(0., 1., 64)\n for idx, key in enumerate(['red','green','blue']):\n col_data[:,idx] = np.clip( data[key](col_data[:,-1]), 0, 1)\n cmap = ColorMap(pos=col_data[:,-1], color=255*col_data[:,:3]+0.5)\n elif hasattr(col_map, 'colors'): # handle ListedColormap\n col_data = np.array(col_map.colors)\n cmap = ColorMap( name=name,\n pos = np.linspace(0.0, 1.0, col_data.shape[0]), color=255*col_data[:,:3]+0.5 )\n if cmap is not None:\n cmap.name = name\n _mapCache[name] = cmap\n return cmap\n\ndef getFromColorcet(name):\n \"\"\" Generates a ColorMap object from a colorcet definition. Same as ``colormap.get(name, source='colorcet')``. \"\"\"\n try:\n import colorcet\n except ModuleNotFoundError:\n return None\n color_strings = colorcet.palette[name]\n color_list = []\n for hex_str in color_strings:\n if hex_str[0] != '#': continue\n if len(hex_str) != 7:\n raise ValueError(f\"Invalid color string '{hex_str}' in colorcet import.\")\n color_tuple = tuple( bytes.fromhex( hex_str[1:] ) )\n color_list.append( color_tuple )\n if len(color_list) == 0:\n return None\n cmap = ColorMap( name=name,\n pos=np.linspace(0.0, 1.0, len(color_list)), \n color=color_list) #, names=color_names)\n if cmap is not None:\n cmap.name = name\n _mapCache[name] = cmap\n return cmap\n \ndef makeHslCycle( hue=0.0, saturation=1.0, lightness=0.5, steps=36 ):\n \"\"\"\n Returns a ColorMap object that traces a circular or spiraling path around the HSL color space.\n\n Parameters\n ----------\n hue : float or tuple of floats\n Starting point or (start, end) for hue. Values can lie outside the [0 to 1] range \n to realize multiple cycles. For a single value, one full hue cycle is generated.\n The default starting hue is 0.0 (red). \n saturation : float or tuple of floats, optional\n Saturation value for the colors in the cycle, in the range of [0 to 1]. \n If a (start, end) tuple is given, saturation gradually changes between these values.\n The default saturation is 1.0.\n lightness : float or tuple of floats, optional\n Lightness value for the colors in the cycle, in the range of [0 to 1]. \n If a (start, end) tuple is given, lightness gradually changes between these values.\n The default lightness is 1.0.\n steps: int, optional\n Number of steps in the cycle. Between these steps, the color map will interpolate in RGB space.\n The default number of steps is 36, generating a color map with 37 stops.\n \"\"\"\n if isinstance( hue, (tuple, list) ):\n hueA, hueB = hue\n else:\n hueA = hue\n hueB = hueA + 1.0\n if isinstance( saturation, (tuple, list) ):\n satA, satB = saturation\n else:\n satA = satB = saturation\n if isinstance( lightness, (tuple, list) ):\n lgtA, lgtB = lightness\n else:\n lgtA = lgtB = lightness\n hue_vals = np.linspace(hueA, hueB, num=steps+1)\n sat_vals = np.linspace(satA, satB, num=steps+1)\n lgt_vals = np.linspace(lgtA, lgtB, num=steps+1)\n color_list = []\n for hue, sat, lgt in zip( hue_vals, sat_vals, lgt_vals):\n qcol = QtGui.QColor.fromHslF( hue%1.0, sat, lgt )\n color_list.append( qcol )\n name = f'Hue {hueA:0.2f}-{hueB:0.2f}'\n return ColorMap( None, color_list, name=name )\n\ndef makeMonochrome(color='neutral'):\n \"\"\"\n Returns a ColorMap object with a dark to bright ramp and adjustable tint.\n \n In addition to neutral, warm or cold grays, imitations of monochrome computer monitors are also\n available. The following predefined color ramps are available:\n `neutral`, `warm`, `cool`, `green`, `amber`, `blue`, `red`, `pink`, `lavender`.\n \n The ramp can also be specified by a tuple of float values in the range of 0 to 1.\n In this case `(h, s, l0, l1)` describe hue, saturation, minimum lightness and maximum lightness\n within the HSL color space. The values `l0` and `l1` can be omitted. They default to \n `l0=0.0` and `l1=1.0` in this case.\n\n Parameters\n ----------\n color: str or tuple of floats\n Color description. Can be one of the predefined identifiers, or a tuple\n `(h, s, l0, l1)`, `(h, s)` or (`h`).\n 'green', 'amber', 'blue', 'red', 'lavender', 'pink'\n or a tuple of relative ``(R,G,B)`` contributions in range 0.0 to 1.0\n \"\"\"\n name=f'Monochrome {color}'\n defaults = {\n 'neutral': (0.00, 0.00, 0.00, 1.00),\n 'warm' : (0.10, 0.08, 0.00, 0.95),\n 'cool' : (0.60, 0.08, 0.00, 0.95),\n 'green' : (0.35, 0.55, 0.02, 0.90),\n 'amber' : (0.09, 0.80, 0.02, 0.80),\n 'blue' : (0.58, 0.85, 0.02, 0.95),\n 'red' : (0.01, 0.60, 0.02, 0.90),\n 'pink' : (0.93, 0.65, 0.02, 0.95),\n 'lavender': (0.75, 0.50, 0.02, 0.90)\n }\n if isinstance(color, str):\n if color in defaults:\n h_val, s_val, l_min, l_max = defaults[color]\n else:\n valid = ','.join(defaults.keys())\n raise ValueError(f\"Undefined color descriptor '{color}', known values are:\\n{valid}\")\n else:\n s_val = 0.70 # set up default values\n l_min = 0.00\n l_max = 1.00\n if not hasattr(color,'__len__'):\n h_val = float(color)\n elif len(color) == 1:\n h_val = color[0]\n elif len(color) == 2:\n h_val, s_val = color\n elif len(color) == 4:\n h_val, s_val, l_min, l_max = color\n else:\n raise ValueError(f\"Invalid color descriptor '{color}'\")\n l_vals = np.linspace(l_min, l_max, num=16)\n color_list = []\n for l_val in l_vals:\n qcol = QtGui.QColor.fromHslF( h_val, s_val, l_val )\n color_list.append( qcol )\n return ColorMap( None, color_list, name=name, linearize=True )\n\ndef modulatedBarData(length=768, width=32):\n \"\"\" \n Returns an NumPy array that represents a modulated color bar ranging from 0 to 1.\n This is used to judge the perceived variation of the color gradient.\n \n Parameters\n ----------\n length: int\n Length of the data set. Values will vary from 0 to 1 over this axis.\n width: int\n Width of the data set. The modulation will vary from 0% to 4% over this axis. \n \"\"\"\n gradient = np.linspace(0.00, 1.00, length)\n modulation = -0.04 * np.sin( (np.pi/4) * np.arange(length) )\n data = np.zeros( (length, width) )\n for idx in range(width):\n data[:,idx] = gradient + (idx/(width-1)) * modulation\n clip_array(data, 0.0, 1.0, out=data)\n return data\n\nclass ColorMap(object):\n \"\"\"\n ColorMap(pos, color, mapping=ColorMap.CLIP)\n\n ColorMap stores a mapping of specific data values to colors, for example:\n\n | 0.0 → black\n | 0.2 → red\n | 0.6 → yellow\n | 1.0 → white\n\n The colors for intermediate values are determined by interpolating between\n the two nearest colors in RGB color space.\n\n A ColorMap object provides access to the interpolated colors by indexing with a float value:\n ``cm[0.5]`` returns a QColor corresponding to the center of ColorMap `cm`.\n \"\"\"\n ## mapping modes\n CLIP = 1\n REPEAT = 2\n MIRROR = 3\n DIVERGING = 4\n\n ## return types\n BYTE = 1\n FLOAT = 2\n QCOLOR = 3\n\n enumMap = {\n 'clip': CLIP,\n 'repeat': REPEAT,\n 'mirror': MIRROR,\n 'diverging': DIVERGING,\n 'byte': BYTE,\n 'float': FLOAT,\n 'qcolor': QCOLOR,\n }\n\n def __init__(self, pos, color, mapping=CLIP, mode=None, linearize=False, name=''):\n \"\"\"\n __init__(pos, color, mapping=ColorMap.CLIP)\n \n Parameters\n ----------\n pos: array_like of float, optional\n Assigned positions of specified colors. `None` sets equal spacing.\n Values need to be in range 0.0-1.0.\n color: array_like of color_like\n List of colors, interpreted via :func:`mkColor() <pyqtgraph.mkColor>`.\n mapping: str or int, optional\n Controls how values outside the 0 to 1 range are mapped to colors.\n See :func:`setMappingMode() <ColorMap.setMappingMode>` for details. \n \n The default of `ColorMap.CLIP` continues to show\n the colors assigned to 0 and 1 for all values below or above this range, respectively.\n \"\"\"\n self.name = name # storing a name helps identify ColorMaps sampled by Palette\n if pos is None:\n order = range(len(color))\n self.pos = np.linspace(0.0, 1.0, num=len(color))\n else:\n self.pos = np.array(pos)\n order = np.argsort(self.pos)\n self.pos = self.pos[order]\n \n self.color = np.zeros( (len(color), 4) ) # stores float rgba values\n for cnt, idx in enumerate(order):\n self.color[cnt] = mkColor(color[idx]).getRgbF()\n # alternative code may be more efficient, but fails to handle lists of QColor.\n # self.color = np.apply_along_axis(\n # func1d = lambda x: np.uint8( mkColor(x).getRgb() ), # cast RGB integer values to uint8\n # axis = -1,\n # arr = color,\n # )[order]\n \n self.mapping_mode = self.CLIP # default to CLIP mode \n if mapping is not None:\n self.setMappingMode( mapping )\n self.stopsCache = {}\n if linearize: self.linearize()\n\n def setMappingMode(self, mapping):\n \"\"\"\n Sets the way that values outside of the range 0 to 1 are mapped to colors.\n\n Parameters\n ----------\n mapping: int or str\n Sets mapping mode to\n\n - `ColorMap.CLIP` or 'clip': Values are clipped to the range 0 to 1. ColorMap defaults to this.\n - `ColorMap.REPEAT` or 'repeat': Colors repeat cyclically, i.e. range 1 to 2 repeats the colors for 0 to 1.\n - `ColorMap.MIRROR` or 'mirror': The range 0 to -1 uses same colors (in reverse order) as 0 to 1.\n - `ColorMap.DIVERGING` or 'diverging': Colors are mapped to -1 to 1 such that the central value appears at 0.\n \"\"\"\n if isinstance(mapping, str):\n mapping = self.enumMap[mapping.lower()]\n if mapping in [self.CLIP, self.REPEAT, self.DIVERGING, self.MIRROR]:\n self.mapping_mode = mapping # only allow defined values\n else:\n raise ValueError(f\"Undefined mapping type '{mapping}'\")\n self.stopsCache = {}\n \n def __str__(self):\n \"\"\" provide human-readable identifier \"\"\"\n if self.name is None:\n return 'unnamed ColorMap({:d})'.format(len(self.pos))\n return \"ColorMap({:d}):'{:s}'\".format(len(self.pos),self.name)\n\n def __getitem__(self, key):\n \"\"\" Convenient shorthand access to palette colors \"\"\"\n if isinstance(key, int): # access by color index\n return self.getByIndex(key)\n # otherwise access by map\n try: # accept any numerical format that converts to float\n float_idx = float(key)\n return self.mapToQColor(float_idx)\n except ValueError: pass\n return None\n\n def linearize(self):\n \"\"\"\n Adjusts the positions assigned to color stops to approximately equalize the perceived color difference\n for a fixed step.\n \"\"\"\n colors = self.getColors(mode=self.QCOLOR)\n distances = colorDistance(colors)\n positions = np.insert( np.cumsum(distances), 0, 0.0 )\n self.pos = positions / positions[-1] # normalize last value to 1.0\n self.stopsCache = {}\n\n def reverse(self):\n \"\"\"\n Reverses the color map, so that the color assigned to a value of 1 now appears at 0 and vice versa.\n This is convenient to adjust imported color maps.\n \"\"\"\n self.pos = 1.0 - np.flip( self.pos )\n self.color = np.flip( self.color, axis=0 )\n self.stopsCache = {}\n \n def getSubset(self, start, span):\n \"\"\"\n Returns a new ColorMap object that extracts the subset specified by 'start' and 'length' \n to the full 0.0 to 1.0 range. A negative length results in a color map that is reversed \n relative to the original.\n \n Parameters\n ----------\n start : float\n Starting value that defines the 0.0 value of the new color map.\n Possible value between 0.0 to 1.0\n span : float\n Span of the extracted region. The original color map will be \n treated as cyclical if the extracted interval exceeds the \n 0.0 to 1.0 range. Possible values between -1.0 to 1.0.\n \"\"\"\n pos, col = self.getStops( mode=ColorMap.FLOAT )\n start = clip_scalar(start, 0.0, 1.0)\n span = clip_scalar(span, -1.0, 1.0)\n\n if span == 0.0:\n raise ValueError(\"'length' needs to be non-zero\")\n stop = (start + span)\n if stop > 1.0 or stop < 0.0: stop = stop % 1.0\n # find indices *inside* range, start and end will be added by sampling later\n if span > 0:\n ref_pos = start # lowest position value at start\n idxA = np.searchsorted( pos, start, side='right' )\n idxB = np.searchsorted( pos, stop , side='left' ) # + 1 # right-side element of interval\n wraps = bool( stop < start ) # wraps around?\n else:\n ref_pos = stop # lowest position value at stop\n idxA = np.searchsorted( pos, stop , side='right')\n idxB = np.searchsorted( pos, start, side='left' ) # + 1 # right-side element of interval\n wraps = bool( stop > start ) # wraps around?\n \n if wraps: # wraps around:\n length1 = (len(pos)-idxA) # before wrap\n length2 = idxB # after wrap\n new_length = length1 + length2 + 2 # combined; plus edge elements\n new_pos = np.zeros( new_length )\n new_col = np.zeros( (new_length, 4) )\n new_pos[ 1:length1+1] = (0 + pos[idxA:] - ref_pos) / span # starting point lie in 0 to 1 range\n new_pos[length1+1:-1] = (1 + pos[:idxB] - ref_pos) / span # end point wrapped to -1 to 0 range\n new_pos[length1] -= np.copysign(1e-6, span) # breaks degeneracy of shifted 0.0 and 1.0 values\n new_col[ 1:length1+1] = col[idxA:]\n new_col[length1+1:-1] = col[:idxB]\n else: # does not wrap around:\n new_length = (idxB - idxA) + 2 # two additional edge values will be added \n new_pos = np.zeros( new_length )\n new_col = np.zeros( (new_length, 4) )\n new_pos[1:-1] = (pos[idxA:idxB] - ref_pos) / span\n new_col[1:-1] = col[idxA:idxB]\n\n if span < 0: # for reversed subsets, positions now progress 0 to -1 and need to be flipped\n new_pos += 1.0\n new_pos = np.flip( new_pos)\n new_col = np.flip( new_col, axis=0 )\n\n new_pos[ 0] = 0.0\n new_col[ 0] = self.mapToFloat(start)\n new_pos[-1] = 1.0\n new_col[-1] = self.mapToFloat(stop)\n\n cmap = ColorMap( pos=new_pos, color=255.*new_col )\n cmap.name = f\"{self.name}[{start:.2f}({span:+.2f})]\"\n return cmap\n\n\n def map(self, data, mode=BYTE):\n \"\"\"\n map(data, mode=ColorMap.BYTE)\n\n Returns an array of colors corresponding to a single value or an array of values.\n Data must be either a scalar position or an array (any shape) of positions.\n\n Parameters\n ----------\n data: float or array_like of float\n Scalar value(s) to be mapped to colors\n\n mode: str or int, optional\n Determines return format:\n\n - `ColorMap.BYTE` or 'byte': Colors are returned as 0-255 unsigned bytes. (default)\n - `ColorMap.FLOAT` or 'float': Colors are returned as 0.0-1.0 floats.\n - `ColorMap.QCOLOR` or 'qcolor': Colors are returned as QColor objects.\n\n Returns\n -------\n np.ndarray of {``ColorMap.BYTE``, ``ColorMap.FLOAT``, QColor}\n for `ColorMap.BYTE` or `ColorMap.FLOAT`:\n\n RGB values for each `data` value, arranged in the same shape as `data`.\n list of QColor\n for `ColorMap.QCOLOR`:\n\n Colors for each `data` value as QColor objects.\n \"\"\"\n if isinstance(mode, str):\n mode = self.enumMap[mode.lower()]\n\n if mode == self.QCOLOR:\n pos, color = self.getStops(self.FLOAT)\n else:\n pos, color = self.getStops(mode)\n\n if np.isscalar(data):\n interp = np.empty((color.shape[1],), dtype=color.dtype)\n else:\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n interp = np.empty(data.shape + (color.shape[1],), dtype=color.dtype)\n\n if self.mapping_mode != self.CLIP:\n if self.mapping_mode == self.REPEAT:\n data = data % 1.0\n elif self.mapping_mode == self.DIVERGING:\n data = (data/2)+0.5\n elif self.mapping_mode == self.MIRROR:\n data = abs(data)\n\n for i in range(color.shape[1]):\n interp[...,i] = np.interp(data, pos, color[:,i])\n\n # Convert to QColor if requested\n if mode == self.QCOLOR:\n if np.isscalar(data):\n return QtGui.QColor.fromRgbF(*interp)\n else:\n return [QtGui.QColor.fromRgbF(*x.tolist()) for x in interp]\n else:\n return interp\n\n def mapToQColor(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.QCOLOR)\n\n def mapToByte(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.BYTE)\n\n def mapToFloat(self, data):\n \"\"\"Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`.\"\"\"\n return self.map(data, mode=self.FLOAT)\n\n def getByIndex(self, idx):\n \"\"\"Retrieve a QColor by the index of the stop it is assigned to.\"\"\"\n return QtGui.QColor.fromRgbF( *self.color[idx] )\n\n def getGradient(self, p1=None, p2=None):\n \"\"\"\n Returns a QtGui.QLinearGradient corresponding to this ColorMap.\n The span and orientation is given by two points in plot coordinates.\n\n When no parameters are given for `p1` and `p2`, the gradient is mapped to the\n `y` coordinates 0 to 1, unless the color map is defined for a more limited range.\n \n This is a somewhat expensive operation, and it is recommended to store and reuse the returned\n gradient instead of repeatedly regenerating it.\n\n Parameters\n ----------\n p1: QtCore.QPointF, optional\n Starting point (value 0) of the gradient. Default value is QPointF(0., 0.)\n p2: QtCore.QPointF, optional\n End point (value 1) of the gradient. Default parameter `dy` is the span of ``max(pos) - min(pos)``\n over which the color map is defined, typically `dy=1`. Default is QPointF(dy, 0.)\n \"\"\"\n if p1 is None:\n p1 = QtCore.QPointF(0,0)\n if p2 is None:\n p2 = QtCore.QPointF(self.pos.max()-self.pos.min(),0)\n grad = QtGui.QLinearGradient(p1, p2)\n\n pos, color = self.getStops(mode=self.QCOLOR)\n if self.mapping_mode == self.MIRROR:\n pos_n = (1. - np.flip(pos)) / 2\n col_n = np.flip( color, axis=0 )\n pos_p = (1. + pos) / 2\n col_p = color\n pos = np.concatenate( (pos_n, pos_p) )\n color = np.concatenate( (col_n, col_p) )\n grad.setStops(list(zip(pos, color)))\n if self.mapping_mode == self.REPEAT:\n grad.setSpread( QtGui.QGradient.Spread.RepeatSpread )\n return grad\n\n def getBrush(self, span=(0.,1.), orientation='vertical'):\n \"\"\"\n Returns a QBrush painting with the color map applied over the selected span of plot values.\n When the mapping mode is set to `ColorMap.MIRROR`, the selected span includes the color map twice,\n first in reversed order and then normal.\n \n It is recommended to store and reuse this gradient brush instead of regenerating it repeatedly.\n\n Parameters\n ----------\n span : tuple of float, optional\n Span of data values covered by the gradient:\n\n - Color map value 0.0 will appear at `min`,\n - Color map value 1.0 will appear at `max`.\n \n Default value is (0., 1.)\n\n orientation : str, default 'vertical'\n Orientation of the gradient:\n\n - 'vertical': `span` corresponds to the `y` coordinate.\n - 'horizontal': `span` corresponds to the `x` coordinate.\n \"\"\"\n if orientation == 'vertical':\n grad = self.getGradient( p1=QtCore.QPointF(0.,span[0]), p2=QtCore.QPointF(0.,span[1]) )\n elif orientation == 'horizontal':\n grad = self.getGradient( p1=QtCore.QPointF(span[0],0.), p2=QtCore.QPointF(span[1],0.) )\n else:\n raise ValueError(\"Orientation must be 'vertical' or 'horizontal'\")\n return QtGui.QBrush(grad)\n\n def getPen(self, span=(0.,1.), orientation='vertical', width=1.0):\n \"\"\"\n Returns a QPen that draws according to the color map based on vertical or horizontal position.\n \n It is recommended to store and reuse this gradient pen instead of regenerating it repeatedly.\n\n\n Parameters\n ----------\n span : tuple of float\n Span of the data values covered by the gradient:\n\n - Color map value 0.0 will appear at `min`.\n - Color map value 1.0 will appear at `max`.\n\n Default is (0., 1.)\n orientation : str, default 'vertical'\n Orientation of the gradient:\n\n - 'vertical' creates a vertical gradient, where `span` corresponds to the `y` coordinate.\n - 'horizontal' creates a horizontal gradient, where `span` corresponds to the `x` coordinate.\n\n width : int or float\n Width of the pen in pixels on screen.\n \"\"\"\n brush = self.getBrush( span=span, orientation=orientation )\n pen = QtGui.QPen(brush, width)\n pen.setCosmetic(True)\n return pen\n\n def getColors(self, mode=BYTE):\n \"\"\"\n Returns a list of the colors associated with the stops of the color map.\n \n The parameter `mode` can be one of\n - `ColorMap.BYTE` or 'byte' to return colors as RGBA tuples in byte format (0 to 255)\n - `ColorMap.FLOAT` or 'float' to return colors as RGBA tuples in float format (0.0 to 1.0)\n - `ColorMap.QCOLOR` or 'qcolor' to return a list of QColors\n \n The default is byte format.\n \"\"\"\n stops, color = self.getStops(mode=mode)\n return color\n\n def getStops(self, mode=BYTE):\n \"\"\"\n Returns a tuple (stops, colors) containing a list of all stops (ranging 0.0 to 1.0)\n and a list of the associated colors.\n \n The parameter `mode` can be one of\n - `ColorMap.BYTE` or 'byte' to return colors as RGBA tuples in byte format (0 to 255)\n - `ColorMap.FLOAT` or 'float' to return colors as RGBA tuples in float format (0.0 to 1.0)\n - `ColorMap.QCOLOR` or 'qcolor' to return a list of QColors\n\n The default is byte format.\n \"\"\"\n if isinstance(mode, str):\n mode = self.enumMap[mode.lower()]\n\n if mode not in self.stopsCache:\n color = self.color\n if mode == self.BYTE and color.dtype.kind == 'f':\n color = (color*255).astype(np.ubyte)\n elif mode == self.FLOAT and color.dtype.kind != 'f':\n color = color.astype(float) / 255.\n elif mode == self.QCOLOR:\n if color.dtype.kind == 'f':\n factory = QtGui.QColor.fromRgbF\n else:\n factory = QtGui.QColor.fromRgb\n color = [factory(*x.tolist()) for x in color]\n self.stopsCache[mode] = (self.pos, color)\n return self.stopsCache[mode]\n\n def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode=BYTE):\n \"\"\"\n getLookupTable(start=0.0, stop=1.0, nPts=512, alpha=None, mode=ColorMap.BYTE)\n\n Returns an equally-spaced lookup table of RGB(A) values created\n by interpolating the specified color stops.\n\n Parameters\n ----------\n start: float, default=0.0\n The starting value in the lookup table\n stop: float, default=1.0\n The final value in the lookup table\n nPts: int, default=512\n The number of points in the returned lookup table.\n alpha: bool, optional\n Specifies whether or not alpha values are included in the table.\n If alpha is None, it will be automatically determined.\n mode: int or str, default='byte'\n Determines return type as described in :func:`map() <pyqtgraph.ColorMap.map>`, can be\n either `ColorMap.BYTE` (0 to 255), `ColorMap.FLOAT` (0.0 to 1.0) or `ColorMap.QColor`.\n\n Returns\n -------\n np.ndarray of {``ColorMap.BYTE``, ``ColorMap.FLOAT``}\n for `ColorMap.BYTE` or `ColorMap.FLOAT`:\n\n RGB values for each `data` value, arranged in the same shape as `data`.\n If alpha values are included the array has shape (`nPts`, 4), otherwise (`nPts`, 3).\n \n list of QColor\n for `ColorMap.QCOLOR`:\n\n Colors for each `data` value as QColor objects.\n \"\"\"\n if isinstance(mode, str):\n mode = self.enumMap[mode.lower()]\n\n if alpha is None:\n alpha = self.usesAlpha()\n\n x = np.linspace(start, stop, nPts)\n table = self.map(x, mode)\n\n if not alpha and mode != self.QCOLOR:\n return table[:,:3]\n else:\n return table\n\n def usesAlpha(self):\n \"\"\"Returns `True` if any stops have assigned colors with alpha < 255.\"\"\"\n max = 1.0 if self.color.dtype.kind == 'f' else 255\n return np.any(self.color[:,3] != max)\n\n def isMapTrivial(self):\n \"\"\"\n Returns `True` if the gradient has exactly two stops in it: Black at 0.0 and white at 1.0.\n \"\"\"\n if len(self.pos) != 2:\n return False\n if self.pos[0] != 0.0 or self.pos[1] != 1.0:\n return False\n if self.color.dtype.kind == 'f':\n return np.all(self.color == np.array([[0.,0.,0.,1.], [1.,1.,1.,1.]]))\n else:\n return np.all(self.color == np.array([[0,0,0,255], [255,255,255,255]]))\n\n def __repr__(self):\n pos = repr(self.pos).replace('\\n', '')\n color = repr(self.color).replace('\\n', '')\n return \"ColorMap(%s, %s)\" % (pos, color)\n\n def __eq__(self, other):\n if other is None:\n return False\n return eq(self.pos, other.pos) and eq(self.color, other.color)\n", "path": "pyqtgraph/colormap.py" } ]
diff --git a/pyqtgraph/colormap.py b/pyqtgraph/colormap.py index 413c4436e1..5a2b8106b8 100644 --- a/pyqtgraph/colormap.py +++ b/pyqtgraph/colormap.py @@ -622,7 +622,7 @@ def mapToFloat(self, data): def getByIndex(self, idx): """Retrieve a QColor by the index of the stop it is assigned to.""" - return QtGui.QColor( *self.color[idx] ) + return QtGui.QColor.fromRgbF( *self.color[idx] ) def getGradient(self, p1=None, p2=None): """ diff --git a/tests/test_colormap.py b/tests/test_colormap.py index 0926302c65..16372ca6f5 100644 --- a/tests/test_colormap.py +++ b/tests/test_colormap.py @@ -74,3 +74,8 @@ def test_ColorMap_getColors(color_list): colors = cm.getColors('qcolor') for actual, good in zip(colors, qcols): assert actual.getRgbF() == good.getRgbF() + +def test_ColorMap_getByIndex(): + cm = pg.ColorMap([0.0, 1.0], [(0,0,0), (255,0,0)]) + assert cm.getByIndex(0) == QtGui.QColor.fromRgbF(0.0, 0.0, 0.0, 1.0) + assert cm.getByIndex(1) == QtGui.QColor.fromRgbF(1.0, 0.0, 0.0, 1.0)
pypi__warehouse-5656
XML-RPC changelog_since_serial() method is returning invalid XML due to \x01 in sdist name As of 2019-04-01 17:35:00 +0000, running the below Python 3 code: ``` #!/usr/bin/python3 import csv from xmlrpc.client import ServerProxy import sys ENDPOINT = 'https://pypi.org/pypi' SINCE = 5011848 client = ServerProxy(ENDPOINT, use_builtin_types=True) out = csv.writer(sys.stdout) out.writerow('name version timestamp action serial'.split()) for row in client.changelog_since_serial(SINCE): out.writerow(row) ``` fails with: ``` Traceback (most recent call last): File "minimum.py", line 12, in <module> for row in client.changelog_since_serial(SINCE): File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1112, in __call__ return self.__send(self.__name, args) File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1452, in __request verbose=self.__verbose File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1154, in request return self.single_request(host, handler, request_body, verbose) File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1170, in single_request return self.parse_response(resp) File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 1336, in parse_response p.feed(data) File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/xmlrpc/client.py", line 439, in feed self._parser.Parse(data, 0) xml.parsers.expat.ExpatError: not well-formed (invalid token): line 5264, column 38 ``` Manually reproducing the XML-RPC request and inspecting the response shows that the problematic piece of XML is (with line number added): ``` <value><array><data> <value><string>request-s</string></value> <value><string>0.0.1</string></value> <value><int>1554118234</int></value> <value><string>add source file request^As-0.0.1.tar.gz</string></value> <!-- line 5364 --> <value><int>5016503</int></value> </data></array></value> ``` The `^A` is here a literal `\x01` which somehow ended up in the filename of an sdist uploaded to the `request-s` project. If Warehouse is going to allow uploads of such filenames, it needs to properly escape them in XML-RPC responses.
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport functools\nimport re\nimport xmlrpc.client\nimport xmlrpc.server\n\nfrom typing import List, Mapping, Union\n\nimport elasticsearch\nimport typeguard\n\nfrom elasticsearch_dsl import Q\nfrom packaging.utils import canonicalize_name\nfrom pyramid.view import view_config\nfrom pyramid_rpc.mapper import MapplyViewMapper\nfrom pyramid_rpc.xmlrpc import (\n XmlRpcError,\n XmlRpcInvalidMethodParams,\n exception_view as _exception_view,\n xmlrpc_method as _xmlrpc_method,\n)\nfrom sqlalchemy import func, orm, select\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.accounts.models import User\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.models import (\n File,\n JournalEntry,\n Project,\n Release,\n Role,\n release_classifiers,\n)\nfrom warehouse.search.queries import SEARCH_BOOSTS\n\n# From https://stackoverflow.com/a/22273639\n_illegal_ranges = [\n \"\\x00-\\x08\",\n \"\\x0b-\\x0c\",\n \"\\x0e-\\x1f\",\n \"\\x7f-\\x84\",\n \"\\x86-\\x9f\",\n \"\\ufdd0-\\ufddf\",\n \"\\ufffe-\\uffff\",\n \"\\U0001fffe-\\U0001ffff\",\n \"\\U0002fffe-\\U0002ffff\",\n \"\\U0003fffe-\\U0003ffff\",\n \"\\U0004fffe-\\U0004ffff\",\n \"\\U0005fffe-\\U0005ffff\",\n \"\\U0006fffe-\\U0006ffff\",\n \"\\U0007fffe-\\U0007ffff\",\n \"\\U0008fffe-\\U0008ffff\",\n \"\\U0009fffe-\\U0009ffff\",\n \"\\U000afffe-\\U000affff\",\n \"\\U000bfffe-\\U000bffff\",\n \"\\U000cfffe-\\U000cffff\",\n \"\\U000dfffe-\\U000dffff\",\n \"\\U000efffe-\\U000effff\",\n \"\\U000ffffe-\\U000fffff\",\n \"\\U0010fffe-\\U0010ffff\",\n]\n_illegal_xml_chars_re = re.compile(\"[%s]\" % \"\".join(_illegal_ranges))\n\n\ndef _clean_for_xml(data):\n \"\"\" Sanitize any user-submitted data to ensure that it can be used in XML \"\"\"\n\n # If data is None or an empty string, don't bother\n if data:\n # This turns a string like \"Hello…\" into \"Hello&#8230;\"\n data = data.encode(\"ascii\", \"xmlcharrefreplace\").decode(\"ascii\")\n # However it's still possible that there are invalid characters in the string,\n # so simply remove any of those characters\n return _illegal_xml_chars_re.sub(\"\", data)\n return data\n\n\ndef submit_xmlrpc_metrics(method=None):\n \"\"\"\n Submit metrics.\n \"\"\"\n\n def decorator(f):\n def wrapped(context, request):\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.xmlrpc.call\", tags=[f\"rpc_method:{method}\"])\n with metrics.timed(\n \"warehouse.xmlrpc.timing\", tags=[f\"rpc_method:{method}\"]\n ):\n return f(context, request)\n\n return wrapped\n\n return decorator\n\n\ndef xmlrpc_method(**kwargs):\n \"\"\"\n Support multiple endpoints serving the same views by chaining calls to\n xmlrpc_method\n \"\"\"\n # Add some default arguments\n kwargs.update(\n require_csrf=False,\n require_methods=[\"POST\"],\n decorator=(submit_xmlrpc_metrics(method=kwargs[\"method\"]),),\n mapper=TypedMapplyViewMapper,\n )\n\n def decorator(f):\n rpc2 = _xmlrpc_method(endpoint=\"RPC2\", **kwargs)\n pypi = _xmlrpc_method(endpoint=\"pypi\", **kwargs)\n pypi_slash = _xmlrpc_method(endpoint=\"pypi_slash\", **kwargs)\n return rpc2(pypi_slash(pypi(f)))\n\n return decorator\n\n\nxmlrpc_cache_by_project = functools.partial(\n xmlrpc_method,\n xmlrpc_cache=True,\n xmlrpc_cache_expires=48 * 60 * 60, # 48 hours\n xmlrpc_cache_tag=\"project/%s\",\n xmlrpc_cache_arg_index=0,\n xmlrpc_cache_tag_processor=canonicalize_name,\n)\n\n\nxmlrpc_cache_all_projects = functools.partial(\n xmlrpc_method,\n xmlrpc_cache=True,\n xmlrpc_cache_expires=1 * 60 * 60, # 1 hours\n xmlrpc_cache_tag=\"all-projects\",\n)\n\n\nclass XMLRPCServiceUnavailable(XmlRpcError):\n # NOQA due to N815 'mixedCase variable in class scope',\n # This is the interface for specifying fault code and string for XmlRpcError\n faultCode = -32403 # NOQA: ignore=N815\n faultString = \"server error; service unavailable\" # NOQA: ignore=N815\n\n\nclass XMLRPCInvalidParamTypes(XmlRpcInvalidMethodParams):\n def __init__(self, exc):\n self.exc = exc\n\n # NOQA due to N802 'function name should be lowercase'\n # This is the interface for specifying fault string for XmlRpcError\n @property\n def faultString(self): # NOQA: ignore=N802\n return f\"client error; {self.exc}\"\n\n\nclass XMLRPCWrappedError(xmlrpc.server.Fault):\n def __init__(self, exc):\n # NOQA due to N815 'mixedCase variable in class scope',\n # This is the interface for specifying fault code and string for XmlRpcError\n self.faultCode = -32500 # NOQA: ignore=N815\n self.wrapped_exception = exc # NOQA: ignore=N815\n\n # NOQA due to N802 'function name should be lowercase'\n # This is the interface for specifying fault string for XmlRpcError\n @property\n def faultString(self): # NOQA: ignore=N802\n return \"{exc.__class__.__name__}: {exc}\".format(exc=self.wrapped_exception)\n\n\nclass TypedMapplyViewMapper(MapplyViewMapper):\n def mapply(self, fn, args, kwargs):\n try:\n memo = typeguard._CallMemo(fn, args=args, kwargs=kwargs)\n typeguard.check_argument_types(memo)\n except TypeError as exc:\n print(exc)\n raise XMLRPCInvalidParamTypes(exc)\n\n return super().mapply(fn, args, kwargs)\n\n\n@view_config(route_name=\"pypi\", context=Exception, renderer=\"xmlrpc\")\ndef exception_view(exc, request):\n return _exception_view(exc, request)\n\n\n@xmlrpc_method(method=\"search\")\ndef search(request, spec: Mapping[str, Union[str, List[str]]], operator: str = \"and\"):\n if operator not in {\"and\", \"or\"}:\n raise XMLRPCWrappedError(\n ValueError(\"Invalid operator, must be one of 'and' or 'or'.\")\n )\n\n metrics = request.find_service(IMetricsService, context=None)\n\n # Remove any invalid spec fields\n spec = {\n k: [v] if isinstance(v, str) else v\n for k, v in spec.items()\n if v\n and k\n in {\n \"name\",\n \"version\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"home_page\",\n \"license\",\n \"summary\",\n \"description\",\n \"keywords\",\n \"platform\",\n \"download_url\",\n }\n }\n\n queries = []\n for field, value in sorted(spec.items()):\n q = None\n for item in value:\n kw = {\"query\": item}\n if field in SEARCH_BOOSTS:\n kw[\"boost\"] = SEARCH_BOOSTS[field]\n if q is None:\n q = Q(\"match\", **{field: kw})\n else:\n q |= Q(\"match\", **{field: kw})\n queries.append(q)\n\n if operator == \"and\":\n query = request.es.query(\"bool\", must=queries)\n else:\n query = request.es.query(\"bool\", should=queries)\n\n try:\n results = query[:100].execute()\n except elasticsearch.TransportError:\n metrics.increment(\"warehouse.xmlrpc.search.error\")\n raise XMLRPCServiceUnavailable\n\n metrics.histogram(\"warehouse.xmlrpc.search.results\", len(results))\n\n if \"version\" in spec.keys():\n return [\n {\n \"name\": r.name,\n \"summary\": _clean_for_xml(getattr(r, \"summary\", None)),\n \"version\": v,\n \"_pypi_ordering\": False,\n }\n for r in results\n for v in r.version\n if v in spec.get(\"version\", [v])\n ]\n return [\n {\n \"name\": r.name,\n \"summary\": _clean_for_xml(getattr(r, \"summary\", None)),\n \"version\": r.latest_version,\n \"_pypi_ordering\": False,\n }\n for r in results\n ]\n\n\n@xmlrpc_cache_all_projects(method=\"list_packages\")\ndef list_packages(request):\n names = request.db.query(Project.name).all()\n return [n[0] for n in names]\n\n\n@xmlrpc_cache_all_projects(method=\"list_packages_with_serial\")\ndef list_packages_with_serial(request):\n serials = request.db.query(Project.name, Project.last_serial).all()\n return dict((serial[0], serial[1]) for serial in serials)\n\n\n@xmlrpc_method(method=\"package_hosting_mode\")\ndef package_hosting_mode(request, package_name: str):\n return \"pypi-only\"\n\n\n@xmlrpc_method(method=\"user_packages\")\ndef user_packages(request, username: str):\n roles = (\n request.db.query(Role)\n .join(User, Project)\n .filter(User.username == username)\n .order_by(Role.role_name.desc(), Project.name)\n .all()\n )\n return [(r.role_name, r.project.name) for r in roles]\n\n\n@xmlrpc_method(method=\"top_packages\")\ndef top_packages(request, num=None):\n raise XMLRPCWrappedError(\n RuntimeError(\"This API has been removed. Use BigQuery instead.\")\n )\n\n\n@xmlrpc_cache_by_project(method=\"package_releases\")\ndef package_releases(request, package_name: str, show_hidden: bool = False):\n try:\n project = (\n request.db.query(Project)\n .filter(Project.normalized_name == func.normalize_pep426_name(package_name))\n .one()\n )\n except NoResultFound:\n return []\n\n # This used to support the show_hidden parameter to determine if it should\n # show hidden releases or not. However, Warehouse doesn't support the\n # concept of hidden releases, so this parameter controls if the latest\n # version or all_versions are returned.\n if show_hidden:\n return [v.version for v in project.all_versions]\n else:\n latest_version = project.latest_version\n if latest_version is None:\n return []\n return [latest_version.version]\n\n\n@xmlrpc_method(method=\"package_data\")\ndef package_data(request, package_name, version):\n settings = request.registry.settings\n domain = settings.get(\"warehouse.domain\", request.domain)\n raise XMLRPCWrappedError(\n RuntimeError(\n (\n \"This API has been deprecated. Use \"\n f\"https://{domain}/{package_name}/{version}/json \"\n \"instead. The XMLRPC method release_data can be used in the \"\n \"interim, but will be deprecated in the future.\"\n )\n )\n )\n\n\n@xmlrpc_cache_by_project(method=\"release_data\")\ndef release_data(request, package_name: str, version: str):\n try:\n release = (\n request.db.query(Release)\n .options(orm.undefer(\"description\"))\n .join(Project)\n .filter(\n (Project.normalized_name == func.normalize_pep426_name(package_name))\n & (Release.version == version)\n )\n .one()\n )\n except NoResultFound:\n return {}\n\n return {\n \"name\": release.project.name,\n \"version\": release.version,\n \"stable_version\": None,\n \"bugtrack_url\": None,\n \"package_url\": request.route_url(\n \"packaging.project\", name=release.project.name\n ),\n \"release_url\": request.route_url(\n \"packaging.release\", name=release.project.name, version=release.version\n ),\n \"docs_url\": _clean_for_xml(release.project.documentation_url),\n \"home_page\": _clean_for_xml(release.home_page),\n \"download_url\": _clean_for_xml(release.download_url),\n \"project_url\": [_clean_for_xml(url) for url in release.project_urls],\n \"author\": _clean_for_xml(release.author),\n \"author_email\": _clean_for_xml(release.author_email),\n \"maintainer\": _clean_for_xml(release.maintainer),\n \"maintainer_email\": _clean_for_xml(release.maintainer_email),\n \"summary\": _clean_for_xml(release.summary),\n \"description\": _clean_for_xml(release.description),\n \"license\": _clean_for_xml(release.license),\n \"keywords\": _clean_for_xml(release.keywords),\n \"platform\": release.platform,\n \"classifiers\": list(release.classifiers),\n \"requires\": list(release.requires),\n \"requires_dist\": list(release.requires_dist),\n \"provides\": list(release.provides),\n \"provides_dist\": list(release.provides_dist),\n \"obsoletes\": list(release.obsoletes),\n \"obsoletes_dist\": list(release.obsoletes_dist),\n \"requires_python\": release.requires_python,\n \"requires_external\": list(release.requires_external),\n \"_pypi_ordering\": release._pypi_ordering,\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"cheesecake_code_kwalitee_id\": None,\n \"cheesecake_documentation_id\": None,\n \"cheesecake_installability_id\": None,\n }\n\n\n@xmlrpc_method(method=\"package_urls\")\ndef package_urls(request, package_name, version):\n settings = request.registry.settings\n domain = settings.get(\"warehouse.domain\", request.domain)\n raise XMLRPCWrappedError(\n RuntimeError(\n (\n \"This API has been deprecated. Use \"\n f\"https://{domain}/{package_name}/{version}/json \"\n \"instead. The XMLRPC method release_urls can be used in the \"\n \"interim, but will be deprecated in the future.\"\n )\n )\n )\n\n\n@xmlrpc_cache_by_project(method=\"release_urls\")\ndef release_urls(request, package_name: str, version: str):\n files = (\n request.db.query(File)\n .join(Release, Project)\n .filter(\n (Project.normalized_name == func.normalize_pep426_name(package_name))\n & (Release.version == version)\n )\n .all()\n )\n\n return [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"size\": f.size,\n \"md5_digest\": f.md5_digest,\n \"sha256_digest\": f.sha256_digest,\n \"digests\": {\"md5\": f.md5_digest, \"sha256\": f.sha256_digest},\n \"has_sig\": f.has_signature,\n \"upload_time\": f.upload_time.isoformat() + \"Z\",\n \"comment_text\": f.comment_text,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"path\": f.path,\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n }\n for f in files\n ]\n\n\n@xmlrpc_cache_by_project(method=\"package_roles\")\ndef package_roles(request, package_name: str):\n roles = (\n request.db.query(Role)\n .join(User, Project)\n .filter(Project.normalized_name == func.normalize_pep426_name(package_name))\n .order_by(Role.role_name.desc(), User.username)\n .all()\n )\n return [(r.role_name, r.user.username) for r in roles]\n\n\n@xmlrpc_method(method=\"changelog_last_serial\")\ndef changelog_last_serial(request):\n return request.db.query(func.max(JournalEntry.id)).scalar()\n\n\n@xmlrpc_method(method=\"changelog_since_serial\")\ndef changelog_since_serial(request, serial: int):\n entries = (\n request.db.query(JournalEntry)\n .filter(JournalEntry.id > serial)\n .order_by(JournalEntry.id)\n .limit(50000)\n )\n\n return [\n (\n e.name,\n e.version,\n int(e.submitted_date.replace(tzinfo=datetime.timezone.utc).timestamp()),\n e.action,\n e.id,\n )\n for e in entries\n ]\n\n\n@xmlrpc_method(method=\"changelog\")\ndef changelog(request, since: int, with_ids: bool = False):\n since = datetime.datetime.utcfromtimestamp(since)\n entries = (\n request.db.query(JournalEntry)\n .filter(JournalEntry.submitted_date > since)\n .order_by(JournalEntry.id)\n .limit(50000)\n )\n\n results = (\n (\n e.name,\n e.version,\n int(e.submitted_date.replace(tzinfo=datetime.timezone.utc).timestamp()),\n e.action,\n e.id,\n )\n for e in entries\n )\n\n if with_ids:\n return list(results)\n else:\n return [r[:-1] for r in results]\n\n\n@xmlrpc_method(method=\"browse\")\ndef browse(request, classifiers: List[str]):\n classifiers_q = (\n request.db.query(Classifier)\n .filter(Classifier.classifier.in_(classifiers))\n .subquery()\n )\n\n release_classifiers_q = (\n select([release_classifiers])\n .where(release_classifiers.c.trove_id == classifiers_q.c.id)\n .alias(\"rc\")\n )\n\n releases = (\n request.db.query(Project.name, Release.version)\n .join(Release)\n .join(release_classifiers_q, Release.id == release_classifiers_q.c.release_id)\n .group_by(Project.name, Release.version)\n .having(func.count() == len(classifiers))\n .order_by(Project.name, Release.version)\n .all()\n )\n\n return [(r.name, r.version) for r in releases]\n\n\n@xmlrpc_method(method=\"system.multicall\")\ndef multicall(request, args):\n raise XMLRPCWrappedError(\n ValueError(\n \"MultiCall requests have been deprecated, use individual \"\n \"requests instead.\"\n )\n )\n", "path": "warehouse/legacy/api/xmlrpc/views.py" } ]
[ { "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport functools\nimport re\nimport xmlrpc.client\nimport xmlrpc.server\n\nfrom typing import List, Mapping, Union\n\nimport elasticsearch\nimport typeguard\n\nfrom elasticsearch_dsl import Q\nfrom packaging.utils import canonicalize_name\nfrom pyramid.view import view_config\nfrom pyramid_rpc.mapper import MapplyViewMapper\nfrom pyramid_rpc.xmlrpc import (\n XmlRpcError,\n XmlRpcInvalidMethodParams,\n exception_view as _exception_view,\n xmlrpc_method as _xmlrpc_method,\n)\nfrom sqlalchemy import func, orm, select\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.accounts.models import User\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.models import (\n File,\n JournalEntry,\n Project,\n Release,\n Role,\n release_classifiers,\n)\nfrom warehouse.search.queries import SEARCH_BOOSTS\n\n# From https://stackoverflow.com/a/22273639\n_illegal_ranges = [\n \"\\x00-\\x08\",\n \"\\x0b-\\x0c\",\n \"\\x0e-\\x1f\",\n \"\\x7f-\\x84\",\n \"\\x86-\\x9f\",\n \"\\ufdd0-\\ufddf\",\n \"\\ufffe-\\uffff\",\n \"\\U0001fffe-\\U0001ffff\",\n \"\\U0002fffe-\\U0002ffff\",\n \"\\U0003fffe-\\U0003ffff\",\n \"\\U0004fffe-\\U0004ffff\",\n \"\\U0005fffe-\\U0005ffff\",\n \"\\U0006fffe-\\U0006ffff\",\n \"\\U0007fffe-\\U0007ffff\",\n \"\\U0008fffe-\\U0008ffff\",\n \"\\U0009fffe-\\U0009ffff\",\n \"\\U000afffe-\\U000affff\",\n \"\\U000bfffe-\\U000bffff\",\n \"\\U000cfffe-\\U000cffff\",\n \"\\U000dfffe-\\U000dffff\",\n \"\\U000efffe-\\U000effff\",\n \"\\U000ffffe-\\U000fffff\",\n \"\\U0010fffe-\\U0010ffff\",\n]\n_illegal_xml_chars_re = re.compile(\"[%s]\" % \"\".join(_illegal_ranges))\n\n\ndef _clean_for_xml(data):\n \"\"\" Sanitize any user-submitted data to ensure that it can be used in XML \"\"\"\n\n # If data is None or an empty string, don't bother\n if data:\n # This turns a string like \"Hello…\" into \"Hello&#8230;\"\n data = data.encode(\"ascii\", \"xmlcharrefreplace\").decode(\"ascii\")\n # However it's still possible that there are invalid characters in the string,\n # so simply remove any of those characters\n return _illegal_xml_chars_re.sub(\"\", data)\n return data\n\n\ndef submit_xmlrpc_metrics(method=None):\n \"\"\"\n Submit metrics.\n \"\"\"\n\n def decorator(f):\n def wrapped(context, request):\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.xmlrpc.call\", tags=[f\"rpc_method:{method}\"])\n with metrics.timed(\n \"warehouse.xmlrpc.timing\", tags=[f\"rpc_method:{method}\"]\n ):\n return f(context, request)\n\n return wrapped\n\n return decorator\n\n\ndef xmlrpc_method(**kwargs):\n \"\"\"\n Support multiple endpoints serving the same views by chaining calls to\n xmlrpc_method\n \"\"\"\n # Add some default arguments\n kwargs.update(\n require_csrf=False,\n require_methods=[\"POST\"],\n decorator=(submit_xmlrpc_metrics(method=kwargs[\"method\"]),),\n mapper=TypedMapplyViewMapper,\n )\n\n def decorator(f):\n rpc2 = _xmlrpc_method(endpoint=\"RPC2\", **kwargs)\n pypi = _xmlrpc_method(endpoint=\"pypi\", **kwargs)\n pypi_slash = _xmlrpc_method(endpoint=\"pypi_slash\", **kwargs)\n return rpc2(pypi_slash(pypi(f)))\n\n return decorator\n\n\nxmlrpc_cache_by_project = functools.partial(\n xmlrpc_method,\n xmlrpc_cache=True,\n xmlrpc_cache_expires=48 * 60 * 60, # 48 hours\n xmlrpc_cache_tag=\"project/%s\",\n xmlrpc_cache_arg_index=0,\n xmlrpc_cache_tag_processor=canonicalize_name,\n)\n\n\nxmlrpc_cache_all_projects = functools.partial(\n xmlrpc_method,\n xmlrpc_cache=True,\n xmlrpc_cache_expires=1 * 60 * 60, # 1 hours\n xmlrpc_cache_tag=\"all-projects\",\n)\n\n\nclass XMLRPCServiceUnavailable(XmlRpcError):\n # NOQA due to N815 'mixedCase variable in class scope',\n # This is the interface for specifying fault code and string for XmlRpcError\n faultCode = -32403 # NOQA: ignore=N815\n faultString = \"server error; service unavailable\" # NOQA: ignore=N815\n\n\nclass XMLRPCInvalidParamTypes(XmlRpcInvalidMethodParams):\n def __init__(self, exc):\n self.exc = exc\n\n # NOQA due to N802 'function name should be lowercase'\n # This is the interface for specifying fault string for XmlRpcError\n @property\n def faultString(self): # NOQA: ignore=N802\n return f\"client error; {self.exc}\"\n\n\nclass XMLRPCWrappedError(xmlrpc.server.Fault):\n def __init__(self, exc):\n # NOQA due to N815 'mixedCase variable in class scope',\n # This is the interface for specifying fault code and string for XmlRpcError\n self.faultCode = -32500 # NOQA: ignore=N815\n self.wrapped_exception = exc # NOQA: ignore=N815\n\n # NOQA due to N802 'function name should be lowercase'\n # This is the interface for specifying fault string for XmlRpcError\n @property\n def faultString(self): # NOQA: ignore=N802\n return \"{exc.__class__.__name__}: {exc}\".format(exc=self.wrapped_exception)\n\n\nclass TypedMapplyViewMapper(MapplyViewMapper):\n def mapply(self, fn, args, kwargs):\n try:\n memo = typeguard._CallMemo(fn, args=args, kwargs=kwargs)\n typeguard.check_argument_types(memo)\n except TypeError as exc:\n print(exc)\n raise XMLRPCInvalidParamTypes(exc)\n\n return super().mapply(fn, args, kwargs)\n\n\n@view_config(route_name=\"pypi\", context=Exception, renderer=\"xmlrpc\")\ndef exception_view(exc, request):\n return _exception_view(exc, request)\n\n\n@xmlrpc_method(method=\"search\")\ndef search(request, spec: Mapping[str, Union[str, List[str]]], operator: str = \"and\"):\n if operator not in {\"and\", \"or\"}:\n raise XMLRPCWrappedError(\n ValueError(\"Invalid operator, must be one of 'and' or 'or'.\")\n )\n\n metrics = request.find_service(IMetricsService, context=None)\n\n # Remove any invalid spec fields\n spec = {\n k: [v] if isinstance(v, str) else v\n for k, v in spec.items()\n if v\n and k\n in {\n \"name\",\n \"version\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"home_page\",\n \"license\",\n \"summary\",\n \"description\",\n \"keywords\",\n \"platform\",\n \"download_url\",\n }\n }\n\n queries = []\n for field, value in sorted(spec.items()):\n q = None\n for item in value:\n kw = {\"query\": item}\n if field in SEARCH_BOOSTS:\n kw[\"boost\"] = SEARCH_BOOSTS[field]\n if q is None:\n q = Q(\"match\", **{field: kw})\n else:\n q |= Q(\"match\", **{field: kw})\n queries.append(q)\n\n if operator == \"and\":\n query = request.es.query(\"bool\", must=queries)\n else:\n query = request.es.query(\"bool\", should=queries)\n\n try:\n results = query[:100].execute()\n except elasticsearch.TransportError:\n metrics.increment(\"warehouse.xmlrpc.search.error\")\n raise XMLRPCServiceUnavailable\n\n metrics.histogram(\"warehouse.xmlrpc.search.results\", len(results))\n\n if \"version\" in spec.keys():\n return [\n {\n \"name\": r.name,\n \"summary\": _clean_for_xml(getattr(r, \"summary\", None)),\n \"version\": v,\n \"_pypi_ordering\": False,\n }\n for r in results\n for v in r.version\n if v in spec.get(\"version\", [v])\n ]\n return [\n {\n \"name\": r.name,\n \"summary\": _clean_for_xml(getattr(r, \"summary\", None)),\n \"version\": r.latest_version,\n \"_pypi_ordering\": False,\n }\n for r in results\n ]\n\n\n@xmlrpc_cache_all_projects(method=\"list_packages\")\ndef list_packages(request):\n names = request.db.query(Project.name).all()\n return [n[0] for n in names]\n\n\n@xmlrpc_cache_all_projects(method=\"list_packages_with_serial\")\ndef list_packages_with_serial(request):\n serials = request.db.query(Project.name, Project.last_serial).all()\n return dict((serial[0], serial[1]) for serial in serials)\n\n\n@xmlrpc_method(method=\"package_hosting_mode\")\ndef package_hosting_mode(request, package_name: str):\n return \"pypi-only\"\n\n\n@xmlrpc_method(method=\"user_packages\")\ndef user_packages(request, username: str):\n roles = (\n request.db.query(Role)\n .join(User, Project)\n .filter(User.username == username)\n .order_by(Role.role_name.desc(), Project.name)\n .all()\n )\n return [(r.role_name, r.project.name) for r in roles]\n\n\n@xmlrpc_method(method=\"top_packages\")\ndef top_packages(request, num=None):\n raise XMLRPCWrappedError(\n RuntimeError(\"This API has been removed. Use BigQuery instead.\")\n )\n\n\n@xmlrpc_cache_by_project(method=\"package_releases\")\ndef package_releases(request, package_name: str, show_hidden: bool = False):\n try:\n project = (\n request.db.query(Project)\n .filter(Project.normalized_name == func.normalize_pep426_name(package_name))\n .one()\n )\n except NoResultFound:\n return []\n\n # This used to support the show_hidden parameter to determine if it should\n # show hidden releases or not. However, Warehouse doesn't support the\n # concept of hidden releases, so this parameter controls if the latest\n # version or all_versions are returned.\n if show_hidden:\n return [v.version for v in project.all_versions]\n else:\n latest_version = project.latest_version\n if latest_version is None:\n return []\n return [latest_version.version]\n\n\n@xmlrpc_method(method=\"package_data\")\ndef package_data(request, package_name, version):\n settings = request.registry.settings\n domain = settings.get(\"warehouse.domain\", request.domain)\n raise XMLRPCWrappedError(\n RuntimeError(\n (\n \"This API has been deprecated. Use \"\n f\"https://{domain}/{package_name}/{version}/json \"\n \"instead. The XMLRPC method release_data can be used in the \"\n \"interim, but will be deprecated in the future.\"\n )\n )\n )\n\n\n@xmlrpc_cache_by_project(method=\"release_data\")\ndef release_data(request, package_name: str, version: str):\n try:\n release = (\n request.db.query(Release)\n .options(orm.undefer(\"description\"))\n .join(Project)\n .filter(\n (Project.normalized_name == func.normalize_pep426_name(package_name))\n & (Release.version == version)\n )\n .one()\n )\n except NoResultFound:\n return {}\n\n return {\n \"name\": release.project.name,\n \"version\": release.version,\n \"stable_version\": None,\n \"bugtrack_url\": None,\n \"package_url\": request.route_url(\n \"packaging.project\", name=release.project.name\n ),\n \"release_url\": request.route_url(\n \"packaging.release\", name=release.project.name, version=release.version\n ),\n \"docs_url\": _clean_for_xml(release.project.documentation_url),\n \"home_page\": _clean_for_xml(release.home_page),\n \"download_url\": _clean_for_xml(release.download_url),\n \"project_url\": [_clean_for_xml(url) for url in release.project_urls],\n \"author\": _clean_for_xml(release.author),\n \"author_email\": _clean_for_xml(release.author_email),\n \"maintainer\": _clean_for_xml(release.maintainer),\n \"maintainer_email\": _clean_for_xml(release.maintainer_email),\n \"summary\": _clean_for_xml(release.summary),\n \"description\": _clean_for_xml(release.description),\n \"license\": _clean_for_xml(release.license),\n \"keywords\": _clean_for_xml(release.keywords),\n \"platform\": release.platform,\n \"classifiers\": list(release.classifiers),\n \"requires\": list(release.requires),\n \"requires_dist\": list(release.requires_dist),\n \"provides\": list(release.provides),\n \"provides_dist\": list(release.provides_dist),\n \"obsoletes\": list(release.obsoletes),\n \"obsoletes_dist\": list(release.obsoletes_dist),\n \"requires_python\": release.requires_python,\n \"requires_external\": list(release.requires_external),\n \"_pypi_ordering\": release._pypi_ordering,\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"cheesecake_code_kwalitee_id\": None,\n \"cheesecake_documentation_id\": None,\n \"cheesecake_installability_id\": None,\n }\n\n\n@xmlrpc_method(method=\"package_urls\")\ndef package_urls(request, package_name, version):\n settings = request.registry.settings\n domain = settings.get(\"warehouse.domain\", request.domain)\n raise XMLRPCWrappedError(\n RuntimeError(\n (\n \"This API has been deprecated. Use \"\n f\"https://{domain}/{package_name}/{version}/json \"\n \"instead. The XMLRPC method release_urls can be used in the \"\n \"interim, but will be deprecated in the future.\"\n )\n )\n )\n\n\n@xmlrpc_cache_by_project(method=\"release_urls\")\ndef release_urls(request, package_name: str, version: str):\n files = (\n request.db.query(File)\n .join(Release, Project)\n .filter(\n (Project.normalized_name == func.normalize_pep426_name(package_name))\n & (Release.version == version)\n )\n .all()\n )\n\n return [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"size\": f.size,\n \"md5_digest\": f.md5_digest,\n \"sha256_digest\": f.sha256_digest,\n \"digests\": {\"md5\": f.md5_digest, \"sha256\": f.sha256_digest},\n \"has_sig\": f.has_signature,\n \"upload_time\": f.upload_time.isoformat() + \"Z\",\n \"comment_text\": f.comment_text,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"path\": f.path,\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n }\n for f in files\n ]\n\n\n@xmlrpc_cache_by_project(method=\"package_roles\")\ndef package_roles(request, package_name: str):\n roles = (\n request.db.query(Role)\n .join(User, Project)\n .filter(Project.normalized_name == func.normalize_pep426_name(package_name))\n .order_by(Role.role_name.desc(), User.username)\n .all()\n )\n return [(r.role_name, r.user.username) for r in roles]\n\n\n@xmlrpc_method(method=\"changelog_last_serial\")\ndef changelog_last_serial(request):\n return request.db.query(func.max(JournalEntry.id)).scalar()\n\n\n@xmlrpc_method(method=\"changelog_since_serial\")\ndef changelog_since_serial(request, serial: int):\n entries = (\n request.db.query(JournalEntry)\n .filter(JournalEntry.id > serial)\n .order_by(JournalEntry.id)\n .limit(50000)\n )\n\n return [\n (\n e.name,\n e.version,\n int(e.submitted_date.replace(tzinfo=datetime.timezone.utc).timestamp()),\n _clean_for_xml(e.action),\n e.id,\n )\n for e in entries\n ]\n\n\n@xmlrpc_method(method=\"changelog\")\ndef changelog(request, since: int, with_ids: bool = False):\n since = datetime.datetime.utcfromtimestamp(since)\n entries = (\n request.db.query(JournalEntry)\n .filter(JournalEntry.submitted_date > since)\n .order_by(JournalEntry.id)\n .limit(50000)\n )\n\n results = (\n (\n e.name,\n e.version,\n int(e.submitted_date.replace(tzinfo=datetime.timezone.utc).timestamp()),\n e.action,\n e.id,\n )\n for e in entries\n )\n\n if with_ids:\n return list(results)\n else:\n return [r[:-1] for r in results]\n\n\n@xmlrpc_method(method=\"browse\")\ndef browse(request, classifiers: List[str]):\n classifiers_q = (\n request.db.query(Classifier)\n .filter(Classifier.classifier.in_(classifiers))\n .subquery()\n )\n\n release_classifiers_q = (\n select([release_classifiers])\n .where(release_classifiers.c.trove_id == classifiers_q.c.id)\n .alias(\"rc\")\n )\n\n releases = (\n request.db.query(Project.name, Release.version)\n .join(Release)\n .join(release_classifiers_q, Release.id == release_classifiers_q.c.release_id)\n .group_by(Project.name, Release.version)\n .having(func.count() == len(classifiers))\n .order_by(Project.name, Release.version)\n .all()\n )\n\n return [(r.name, r.version) for r in releases]\n\n\n@xmlrpc_method(method=\"system.multicall\")\ndef multicall(request, args):\n raise XMLRPCWrappedError(\n ValueError(\n \"MultiCall requests have been deprecated, use individual \"\n \"requests instead.\"\n )\n )\n", "path": "warehouse/legacy/api/xmlrpc/views.py" } ]
diff --git a/warehouse/legacy/api/xmlrpc/views.py b/warehouse/legacy/api/xmlrpc/views.py index eca0dc3acf48..a5cfeeb6ea12 100644 --- a/warehouse/legacy/api/xmlrpc/views.py +++ b/warehouse/legacy/api/xmlrpc/views.py @@ -492,7 +492,7 @@ def changelog_since_serial(request, serial: int): e.name, e.version, int(e.submitted_date.replace(tzinfo=datetime.timezone.utc).timestamp()), - e.action, + _clean_for_xml(e.action), e.id, ) for e in entries
Qiskit__qiskit-1371
Revise the documentation TOC subsection handling <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. --> ### What is the expected enhancement? An issue raised during #1254: > Also Is it possible in the template to show the subsection when that section is viewed. The new fancy design currently does not show subsections in the left-side TOC pane (except for the autodocs). Ideally it should show the subsections, allowing collapsing with sensible defaults and usability.
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Qiskit documentation build configuration file, created by\n# sphinx-quickstart on Tue Jul 25 18:13:28 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom qiskit import __version__\nsys.path.insert(0, os.path.abspath('.'))\n\n# Imported manually, as otherwise it will not be fully imported.\nimport qiskit.extensions.simulator\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.doctest',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages']\n# Napoleon settings\nnapoleon_google_docstring = True\nnapoleon_numpy_docstring = False\nnapoleon_include_init_with_doc = True\nnapoleon_include_private_with_doc = False\nnapoleon_include_special_with_doc = False\nnapoleon_use_admonition_for_examples = False\nnapoleon_use_admonition_for_notes = False\nnapoleon_use_admonition_for_references = False\nnapoleon_use_ivar = False\nnapoleon_use_param = True\nnapoleon_use_rtype = True\n\nautoclass_content = 'both'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Qiskit Terra'\ncopyright = '2017-2018 IBM'\nauthor = 'IBM'\n\n# Add description\nhtml_context = {\n 'description': 'Qiskit Terra'\n}\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = __version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store',\n '_autodoc/modules.rst', 'de', 'ja']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\n# html_theme = 'bizstyle'\n# html_theme = agogo\n\nhtml_theme = 'sphinx_materialdesign_theme' # use the theme in subdir 'theme'\nhtml_theme_path = ['./'] # make sphinx search for themes in current dir\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n # Specify a list of menu in Header.\n # Tuples forms:\n # ('Name', 'external url or path of pages in the document', boolean, 'icon name')\n #\n # Third argument:\n # True indicates an external link.\n # False indicates path of pages in the document.\n #\n # Fourth argument:\n # Specify the icon name.\n # For details see link.\n # https://material.io/icons/\n 'header_links' : [\n ('Home', 'index', False, 'home'),\n (\"ExternalLink\", \"http://example.com\", True, 'launch'),\n (\"NoIconLink\", \"http://example.com\", True, ''),\n (\"GitHub\", \"https://github.com/myyasuda/sphinx_materialdesign_theme\", True, 'link')\n ],\n\n # Customize css colors.\n # For details see link.\n # https://getmdl.io/customize/index.html\n #\n # Values: amber, blue, brown, cyan deep_orange, deep_purple, green, grey, indigo, light_blue,\n # light_green, lime, orange, pink, purple, red, teal, yellow(Default: indigo)\n 'primary_color': 'blue',\n # Values: Same as primary_color. (Default: pink)\n 'accent_color': 'indigo',\n\n # Customize layout.\n # For details see link.\n # https://getmdl.io/components/index.html#layout-section\n 'fixed_drawer': True,\n 'fixed_header': False,\n 'header_waterfall': True,\n 'header_scroll': False,\n\n # Render title in header.\n # Values: True, False (Default: False)\n 'show_header_title': False,\n # Render title in drawer.\n # Values: True, False (Default: True)\n 'show_drawer_title': True,\n # Render footer.\n}\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['./theme/static/']\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = 'theme/static/qiskit-terra-logo.png'\n\nhtml_favicon = 'theme/static/favicon.ico'\n\nhtml_last_updated_fmt = '%Y/%m/%d'\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Qiskitdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Qiskit.tex', 'Qiskit Documentation',\n '''Jim Challenger, Andrew Cross, Ismael Faro, Jay Gambetta''', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'qiskit', 'Qiskit Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Qiskit Terra', 'Qiskit Terra Documentation',\n author, 'Qiskit', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# Avoid a warning and treat the docstrings of the QasmLexer tokens as verbatim,\n# as PLY uses docstring as a way to define the patterns the token matches.\ndef remove_module_docstring(app, what, name, obj, options, lines):\n if name.startswith('qiskit.qasm._qasmlexer.QasmLexer.t_') and lines:\n lines[0] = u'Token matching: ``%s``' % lines[0]\n\n\ndef setup(app):\n app.connect('autodoc-process-docstring', remove_module_docstring)\n", "path": "doc/conf.py" } ]
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Qiskit documentation build configuration file, created by\n# sphinx-quickstart on Tue Jul 25 18:13:28 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom qiskit import __version__\nsys.path.insert(0, os.path.abspath('.'))\n\n# Imported manually, as otherwise it will not be fully imported.\nimport qiskit.extensions.simulator\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.doctest',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages']\n# Napoleon settings\nnapoleon_google_docstring = True\nnapoleon_numpy_docstring = False\nnapoleon_include_init_with_doc = True\nnapoleon_include_private_with_doc = False\nnapoleon_include_special_with_doc = False\nnapoleon_use_admonition_for_examples = False\nnapoleon_use_admonition_for_notes = False\nnapoleon_use_admonition_for_references = False\nnapoleon_use_ivar = False\nnapoleon_use_param = True\nnapoleon_use_rtype = True\n\nautoclass_content = 'both'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Qiskit Terra'\ncopyright = '2017-2018 IBM'\nauthor = 'IBM'\n\n# Add description\nhtml_context = {\n 'description': 'Qiskit Terra'\n}\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = __version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store',\n '_autodoc/modules.rst', 'de', 'ja']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\n# html_theme = 'bizstyle'\n# html_theme = agogo\n\nhtml_sidebars = {\n '**': ['globaltoc.html']\n}\n\n\nhtml_theme = 'sphinx_materialdesign_theme' # use the theme in subdir 'theme'\nhtml_theme_path = ['./'] # make sphinx search for themes in current dir\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n # Specify a list of menu in Header.\n # Tuples forms:\n # ('Name', 'external url or path of pages in the document', boolean, 'icon name')\n #\n # Third argument:\n # True indicates an external link.\n # False indicates path of pages in the document.\n #\n # Fourth argument:\n # Specify the icon name.\n # For details see link.\n # https://material.io/icons/\n 'header_links' : [\n ('Home', 'index', False, 'home'),\n (\"ExternalLink\", \"http://example.com\", True, 'launch'),\n (\"NoIconLink\", \"http://example.com\", True, ''),\n (\"GitHub\", \"https://github.com/myyasuda/sphinx_materialdesign_theme\", True, 'link')\n ],\n\n # Customize css colors.\n # For details see link.\n # https://getmdl.io/customize/index.html\n #\n # Values: amber, blue, brown, cyan deep_orange, deep_purple, green, grey, indigo, light_blue,\n # light_green, lime, orange, pink, purple, red, teal, yellow(Default: indigo)\n 'primary_color': 'blue',\n # Values: Same as primary_color. (Default: pink)\n 'accent_color': 'indigo',\n\n # Customize layout.\n # For details see link.\n # https://getmdl.io/components/index.html#layout-section\n 'fixed_drawer': True,\n 'fixed_header': False,\n 'header_waterfall': True,\n 'header_scroll': False,\n\n # Render title in header.\n # Values: True, False (Default: False)\n 'show_header_title': False,\n # Render title in drawer.\n # Values: True, False (Default: True)\n 'show_drawer_title': True,\n # Render footer.\n}\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['./theme/static/']\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = 'theme/static/qiskit-terra-logo.png'\n\nhtml_favicon = 'theme/static/favicon.ico'\n\nhtml_last_updated_fmt = '%Y/%m/%d'\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Qiskitdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Qiskit.tex', 'Qiskit Documentation',\n '''Jim Challenger, Andrew Cross, Ismael Faro, Jay Gambetta''', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'qiskit', 'Qiskit Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Qiskit Terra', 'Qiskit Terra Documentation',\n author, 'Qiskit', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# Avoid a warning and treat the docstrings of the QasmLexer tokens as verbatim,\n# as PLY uses docstring as a way to define the patterns the token matches.\ndef remove_module_docstring(app, what, name, obj, options, lines):\n if name.startswith('qiskit.qasm._qasmlexer.QasmLexer.t_') and lines:\n lines[0] = u'Token matching: ``%s``' % lines[0]\n\n\ndef setup(app):\n app.connect('autodoc-process-docstring', remove_module_docstring)\n", "path": "doc/conf.py" } ]
diff --git a/doc/_templates/drawer.html b/doc/_templates/drawer.html new file mode 100644 index 000000000000..6e08c4e7fcfc --- /dev/null +++ b/doc/_templates/drawer.html @@ -0,0 +1,21 @@ +<header class="mdl-layout__drawer"> + {% if theme_show_drawer_title|tobool %} + <!-- Title --> + <span class="mdl-layout-title"> + <a class="title" href="{{ pathto(master_doc) }}"> + {%- if logo %} + <img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="{{ project }}"/> + {%- else %} + <span class="title-text"> + {{ project }} + </span> + {%- endif %} + </a> + </span> + {% endif %} + {%- if sidebars != None %} + {%- for sidebartemplate in sidebars %} + {%- include sidebartemplate %} + {%- endfor %} +{%- endif %} +</header> \ No newline at end of file diff --git a/doc/_templates/globaltoc.html b/doc/_templates/globaltoc.html new file mode 100644 index 000000000000..5eb3e1dec18b --- /dev/null +++ b/doc/_templates/globaltoc.html @@ -0,0 +1,15 @@ +{% block menu %} +<div class="globaltoc"> + <span class="mdl-layout-title toc">{{ _('Table Of Contents') }}</span> + {% set toctree = toctree(maxdepth=4, collapse=False, includehidden=True, titles_only=False) %} + {% if toctree %} + {% set lines = toctree.split('\n') %} + <nav class="mdl-navigation"> + {{ toctree }} + </nav> + {% else %} + <!-- Local TOC --> + <nav class="mdl-navigation">{{ toc }}</nav> + {% endif %} + </div> +{% endblock %} \ No newline at end of file diff --git a/doc/conf.py b/doc/conf.py index 277eeef14e9b..6f9ba3a7b534 100755 --- a/doc/conf.py +++ b/doc/conf.py @@ -117,6 +117,11 @@ # html_theme = 'bizstyle' # html_theme = agogo +html_sidebars = { + '**': ['globaltoc.html'] +} + + html_theme = 'sphinx_materialdesign_theme' # use the theme in subdir 'theme' html_theme_path = ['./'] # make sphinx search for themes in current dir diff --git a/doc/theme/static/theme.css b/doc/theme/static/theme.css index 8e08a11be9e8..f274052e22db 100644 --- a/doc/theme/static/theme.css +++ b/doc/theme/static/theme.css @@ -223,7 +223,7 @@ a.reference.external:hover{ font-weight: 200; } .active>span.link-wrapper.title{ - margin-left: 43px !important; + margin-left: 29px !important; height: 32px !important; width: 100% !important; } diff --git a/doc/theme/static/themeExt.js b/doc/theme/static/themeExt.js index f3aa2866b1e5..e17715b15144 100644 --- a/doc/theme/static/themeExt.js +++ b/doc/theme/static/themeExt.js @@ -22,10 +22,10 @@ $(function() { if(text.length > 2) { $link[0].text = text[text.length - 1]; } - const isCurrent = $li.hasClass('current') && !$link.hasClass('current'); + const isCurrent = $li.hasClass('current'); const isActive = $li.hasClass('current') && $link.hasClass('mdl-color-text--primary') const $ul = $li.children('ul'); - if ($ul.hasClass('simple') || ($li.hasClass('toctree-l1') && $ul.length === 0)) { + if ($ul.hasClass('simple') || $ul.length === 0) { $linkWrapper.addClass('simple'); } $li.append($div.append($linkWrapper.append($link))); @@ -47,7 +47,7 @@ $(function() { $ul.attr('id', ulId); $ul.addClass('collapse sublist'); $linkWrapper.addClass('title'); - $linkWrapper.children('a').addClass('ibm-type-b-tight semibold'); + $linkWrapper.children('a').addClass('ibm-type-b-tight'); $div.addClass('title'); $li.append($div.append( $linkWrapper.append( @@ -70,8 +70,13 @@ $(function() { } function collapse() { - $('.mdl-layout__drawer nav .item .title').click(function() { + $('.mdl-layout__drawer nav .item .link-wrapper').click(function() { const $toggle = $(this).children('span .nav-toggle').children('a'); + if($toggle.href !== '#'){ + const currentActive = $('.mdl-layout__drawer nav .item.active'); + currentActive.removeClass('active'); + $('.current.mdl-color-text--primary' ).removeClass('mdl-color-text--primary') + } $(this).toggleClass('sectionActive'); const id = $toggle.attr('data-toggle'); $(`ul${id}`).toggleClass('show').animate({height: "toggle", opacity: "toggle"});
systemd__mkosi-1793
Build fails if host does not have /etc/gshadow From my research, `/etc/gshadow` seems to be entirely optional. Both openSUSE and NixOS do not create `/etc/gshadow`. This code expects gshadow to exist on the host and bind mounts `/dev/null` -> `/etc/gshadow` in bubblewrap if the path does not exist in the sysroot we are building: https://github.com/systemd/mkosi/blob/9ae416df714cc87efad876b076ca10b677ed7036/mkosi/run.py#L338-L353 If `/etc/gshadow` does not exist on the host, bubblewrap first needs to create it before bind mounting it. This fails, since we do not have permissions to write to `/etc`. A possible fix is to add `--tmpfs /etc` to the bwrap arguments and add all paths from the host as bind mounts (except for `/etc/passwd`, `/etc/group`, `/etc/shadow`, `/etc/gshadow`). This way, we have permissions to create the mount for `/dev/null` -> `/etc/gshadow`. Alternatively, we can maybe skip the bind mount if the file doesn't exist on the host? Would you be interested in a PR that implements either the tmpfs mount for `/etc` or one that skips the mount if the file doesn't exist on the host?
[ { "content": "# SPDX-License-Identifier: LGPL-2.1+\n\nimport asyncio\nimport asyncio.tasks\nimport ctypes\nimport ctypes.util\nimport enum\nimport fcntl\nimport logging\nimport os\nimport pwd\nimport queue\nimport shlex\nimport signal\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\nimport threading\nfrom pathlib import Path\nfrom types import TracebackType\nfrom typing import Any, Awaitable, Mapping, Optional, Sequence, Tuple, Type\n\nfrom mkosi.log import ARG_DEBUG, ARG_DEBUG_SHELL, die\nfrom mkosi.types import _FILE, CompletedProcess, PathString, Popen\nfrom mkosi.util import InvokingUser, flock, make_executable, one_zero\n\nCLONE_NEWNS = 0x00020000\nCLONE_NEWUSER = 0x10000000\n\nSUBRANGE = 65536\n\n\ndef unshare(flags: int) -> None:\n libc_name = ctypes.util.find_library(\"c\")\n if libc_name is None:\n die(\"Could not find libc\")\n libc = ctypes.CDLL(libc_name, use_errno=True)\n\n if libc.unshare(ctypes.c_int(flags)) != 0:\n e = ctypes.get_errno()\n raise OSError(e, os.strerror(e))\n\n\ndef read_subrange(path: Path) -> int:\n uid = str(os.getuid())\n try:\n user = pwd.getpwuid(os.getuid()).pw_name\n except KeyError:\n user = None\n\n for line in path.read_text().splitlines():\n name, start, count = line.split(\":\")\n\n if name == uid or name == user:\n break\n else:\n die(f\"No mapping found for {user or uid} in {path}\")\n\n if int(count) < SUBRANGE:\n die(f\"subuid/subgid range length must be at least {SUBRANGE}, got {count} for {user or uid} from line '{line}'\")\n\n return int(start)\n\n\ndef become_root() -> tuple[int, int]:\n \"\"\"\n Set up a new user namespace mapping using /etc/subuid and /etc/subgid.\n\n The current user will be mapped to root and 65436 will be mapped to the UID/GID of the invoking user.\n The other IDs will be mapped through.\n\n The function returns the UID-GID pair of the invoking user in the namespace (65436, 65436).\n \"\"\"\n if os.getuid() == 0:\n return InvokingUser.uid_gid()\n\n subuid = read_subrange(Path(\"/etc/subuid\"))\n subgid = read_subrange(Path(\"/etc/subgid\"))\n\n pid = os.getpid()\n\n # We map the private UID range configured in /etc/subuid and /etc/subgid into the container using\n # newuidmap and newgidmap. On top of that, we also make sure to map in the user running mkosi so that\n # we can run still chown stuff to that user or run stuff as that user which will make sure any\n # generated files are owned by that user. We don't map to the last user in the range as the last user\n # is sometimes used in tests as a default value and mapping to that user might break those tests.\n newuidmap = [\n \"flock\", \"--exclusive\", \"--no-fork\", \"/etc/subuid\", \"newuidmap\", pid,\n 0, subuid, SUBRANGE - 100,\n SUBRANGE - 100, os.getuid(), 1,\n SUBRANGE - 100 + 1, subuid + SUBRANGE - 100 + 1, 99\n ]\n\n newgidmap = [\n \"flock\", \"--exclusive\", \"--no-fork\", \"/etc/subuid\", \"newgidmap\", pid,\n 0, subgid, SUBRANGE - 100,\n SUBRANGE - 100, os.getgid(), 1,\n SUBRANGE - 100 + 1, subgid + SUBRANGE - 100 + 1, 99\n ]\n\n newuidmap = [str(x) for x in newuidmap]\n newgidmap = [str(x) for x in newgidmap]\n\n # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid mapping\n # to the process in the user namespace. The mapping can only be assigned after the user namespace has\n # been unshared. To make this work, we first lock /etc/subuid, then spawn the newuidmap and newgidmap\n # processes, which we execute using flock so they don't execute before they can get a lock on /etc/subuid,\n # then we unshare the user namespace and finally we unlock /etc/subuid, which allows the newuidmap and\n # newgidmap processes to execute. we then wait for the processes to finish before continuing.\n with flock(Path(\"/etc/subuid\")) as fd, spawn(newuidmap) as uidmap, spawn(newgidmap) as gidmap:\n unshare(CLONE_NEWUSER)\n fcntl.flock(fd, fcntl.LOCK_UN)\n uidmap.wait()\n gidmap.wait()\n\n # By default, we're root in the user namespace because if we were our current user by default, we\n # wouldn't be able to chown stuff to be owned by root while the reverse is possible.\n os.setresuid(0, 0, 0)\n os.setresgid(0, 0, 0)\n os.setgroups([0])\n\n return SUBRANGE - 100, SUBRANGE - 100\n\n\ndef init_mount_namespace() -> None:\n unshare(CLONE_NEWNS)\n run([\"mount\", \"--make-rslave\", \"/\"])\n\n\ndef foreground(*, new_process_group: bool = True) -> None:\n \"\"\"\n If we're connected to a terminal, put the process in a new process group and make that the foreground\n process group so that only this process receives SIGINT.\n \"\"\"\n STDERR_FILENO = 2\n if os.isatty(STDERR_FILENO):\n if new_process_group:\n os.setpgrp()\n old = signal.signal(signal.SIGTTOU, signal.SIG_IGN)\n os.tcsetpgrp(STDERR_FILENO, os.getpgrp())\n signal.signal(signal.SIGTTOU, old)\n\n\ndef ensure_exc_info() -> Tuple[Type[BaseException], BaseException, TracebackType]:\n exctype, exc, tb = sys.exc_info()\n assert exctype\n assert exc\n assert tb\n return (exctype, exc, tb)\n\n\ndef run(\n cmdline: Sequence[PathString],\n check: bool = True,\n stdin: _FILE = None,\n stdout: _FILE = None,\n stderr: _FILE = None,\n input: Optional[str] = None,\n user: Optional[int] = None,\n group: Optional[int] = None,\n env: Mapping[str, str] = {},\n cwd: Optional[Path] = None,\n log: bool = True,\n) -> CompletedProcess:\n if ARG_DEBUG.get():\n logging.info(f\"+ {' '.join(str(s) for s in cmdline)}\")\n\n cmdline = [os.fspath(x) for x in cmdline]\n\n if not stdout and not stderr:\n # Unless explicit redirection is done, print all subprocess\n # output on stderr, since we do so as well for mkosi's own\n # output.\n stdout = sys.stderr\n\n env = dict(\n PATH=os.environ[\"PATH\"],\n TERM=os.getenv(\"TERM\", \"vt220\"),\n LANG=\"C.UTF-8\",\n ) | env\n\n if ARG_DEBUG.get():\n env[\"SYSTEMD_LOG_LEVEL\"] = \"debug\"\n\n if input is not None:\n assert stdin is None # stdin and input cannot be specified together\n elif stdin is None:\n stdin = subprocess.DEVNULL\n\n try:\n return subprocess.run(\n cmdline,\n check=check,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n input=input,\n text=True,\n user=user,\n group=group,\n env=env,\n cwd=cwd,\n preexec_fn=foreground,\n )\n except FileNotFoundError:\n die(f\"{cmdline[0]} not found in PATH.\")\n except subprocess.CalledProcessError as e:\n if log:\n logging.error(f\"\\\"{' '.join(str(s) for s in cmdline)}\\\" returned non-zero exit code {e.returncode}.\")\n raise e\n finally:\n foreground(new_process_group=False)\n\n\ndef spawn(\n cmdline: Sequence[PathString],\n stdin: _FILE = None,\n stdout: _FILE = None,\n stderr: _FILE = None,\n user: Optional[int] = None,\n group: Optional[int] = None,\n) -> Popen:\n if ARG_DEBUG.get():\n logging.info(f\"+ {' '.join(str(s) for s in cmdline)}\")\n\n if not stdout and not stderr:\n # Unless explicit redirection is done, print all subprocess\n # output on stderr, since we do so as well for mkosi's own\n # output.\n stdout = sys.stderr\n\n try:\n return subprocess.Popen(\n cmdline,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n text=True,\n user=user,\n group=group,\n preexec_fn=foreground,\n )\n except FileNotFoundError:\n die(f\"{cmdline[0]} not found in PATH.\")\n except subprocess.CalledProcessError as e:\n logging.error(f\"\\\"{' '.join(str(s) for s in cmdline)}\\\" returned non-zero exit code {e.returncode}.\")\n raise e\n\n\n# https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h\nclass Capability(enum.Enum):\n CAP_NET_ADMIN = 12\n\n\ndef have_effective_cap(capability: Capability) -> bool:\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"CapEff:\"):\n hexcap = line.removeprefix(\"CapEff:\").strip()\n break\n else:\n logging.warning(f\"\\\"CapEff:\\\" not found in /proc/self/status, assuming we don't have {capability}\")\n return False\n\n return (int(hexcap, 16) & (1 << capability.value)) != 0\n\n\ndef bwrap(\n cmd: Sequence[PathString],\n *,\n network: bool = False,\n options: Sequence[PathString] = (),\n log: bool = True,\n scripts: Mapping[str, Sequence[PathString]] = {},\n env: Mapping[str, str] = {},\n stdin: _FILE = None,\n input: Optional[str] = None,\n) -> CompletedProcess:\n cmdline: list[PathString] = [\n \"bwrap\",\n \"--dev-bind\", \"/\", \"/\",\n \"--remount-ro\", \"/\",\n \"--ro-bind\", \"/root\", \"/root\",\n \"--ro-bind\", \"/home\", \"/home\",\n \"--ro-bind\", \"/var\", \"/var\",\n \"--ro-bind\", \"/run\", \"/run\",\n \"--bind\", \"/var/tmp\", \"/var/tmp\",\n \"--tmpfs\", \"/tmp\",\n \"--bind\", Path.cwd(), Path.cwd(),\n \"--chdir\", Path.cwd(),\n \"--unshare-pid\",\n \"--unshare-ipc\",\n \"--unshare-cgroup\",\n *([\"--unshare-net\"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []),\n \"--die-with-parent\",\n \"--proc\", \"/proc\",\n \"--dev\", \"/dev\",\n \"--ro-bind\", \"/sys\", \"/sys\",\n \"--setenv\", \"SYSTEMD_OFFLINE\", one_zero(network),\n ]\n\n with tempfile.TemporaryDirectory(prefix=\"mkosi-scripts\") as d:\n\n for name, script in scripts.items():\n # Make sure we don't end up in a recursive loop when we name a script after the binary it execs\n # by removing the scripts directory from the PATH when we execute a script.\n (Path(d) / name).write_text(\n textwrap.dedent(\n f\"\"\"\\\n #!/bin/sh\n PATH=\"$(echo $PATH | tr ':' '\\\\n' | grep -v {Path(d)} | tr '\\\\n' ':')\"\n export PATH\n exec {shlex.join(str(s) for s in script)} \"$@\"\n \"\"\"\n )\n )\n\n make_executable(Path(d) / name)\n\n cmdline += [\n \"--setenv\", \"PATH\", f\"{d}:{os.environ['PATH']}\",\n *options,\n \"sh\", \"-c\", \"chmod 1777 /tmp /dev/shm && exec $0 \\\"$@\\\"\",\n ]\n\n try:\n result = run([*cmdline, *cmd], env=env, log=False, stdin=stdin, input=input)\n except subprocess.CalledProcessError as e:\n if log:\n logging.error(f\"\\\"{' '.join(str(s) for s in cmd)}\\\" returned non-zero exit code {e.returncode}.\")\n if ARG_DEBUG_SHELL.get():\n run([*cmdline, \"sh\"], stdin=sys.stdin, check=False, env=env, log=False)\n raise e\n\n return result\n\n\ndef finalize_passwd_mounts(root: Path) -> list[PathString]:\n \"\"\"\n If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we\n run the command, to make sure that the command we run uses user/group information from the apivfs\n directory instead of from the host. If the file doesn't exist yet, mount over /dev/null instead.\n \"\"\"\n options: list[PathString] = []\n\n for f in (\"passwd\", \"group\", \"shadow\", \"gshadow\"):\n p = root / \"etc\" / f\n if p.exists():\n options += [\"--bind\", p, f\"/etc/{f}\"]\n else:\n options += [\"--bind\", \"/dev/null\", f\"/etc/{f}\"]\n\n return options\n\n\ndef apivfs_cmd(root: Path) -> list[PathString]:\n cmdline: list[PathString] = [\n \"bwrap\",\n \"--dev-bind\", \"/\", \"/\",\n \"--chdir\", Path.cwd(),\n \"--tmpfs\", root / \"run\",\n \"--tmpfs\", root / \"tmp\",\n \"--bind\", os.getenv(\"TMPDIR\", \"/var/tmp\"), root / \"var/tmp\",\n \"--proc\", root / \"proc\",\n \"--dev\", root / \"dev\",\n \"--ro-bind\", \"/sys\", root / \"sys\",\n ]\n\n if (root / \"etc/machine-id\").exists():\n # Make sure /etc/machine-id is not overwritten by any package manager post install scripts.\n cmdline += [\"--ro-bind\", root / \"etc/machine-id\", root / \"etc/machine-id\"]\n\n cmdline += finalize_passwd_mounts(root)\n\n chmod = f\"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'}\"\n # Make sure anything running in the root directory thinks it's in a container. $container can't always be\n # accessed so we write /run/host/container-manager as well which is always accessible.\n container = f\"mkdir {root}/run/host && echo mkosi >{root}/run/host/container-manager\"\n\n cmdline += [\"sh\", \"-c\", f\"{chmod} && {container} && exec $0 \\\"$@\\\"\"]\n\n return cmdline\n\n\ndef chroot_cmd(root: Path, *, options: Sequence[PathString] = ()) -> list[PathString]:\n cmdline: list[PathString] = [\n \"bwrap\",\n \"--dev-bind\", root, \"/\",\n \"--setenv\", \"container\", \"mkosi\",\n \"--setenv\", \"HOME\", \"/\",\n \"--setenv\", \"PATH\", \"/usr/bin:/usr/sbin\",\n ]\n\n resolve = Path(\"etc/resolv.conf\")\n if (root / resolve).is_symlink():\n # For each component in the target path, bubblewrap will try to create it if it doesn't exist\n # yet. If a component in the path is a dangling symlink, bubblewrap will end up calling\n # mkdir(symlink) which obviously fails if multiple components of the dangling symlink path don't\n # exist yet. As a workaround, we resolve the symlink ourselves so that bubblewrap will correctly\n # create all missing components in the target path.\n resolve = resolve.parent / (root / resolve).readlink()\n\n cmdline += [\n \"--ro-bind\", \"/etc/resolv.conf\", Path(\"/\") / resolve,\n *options,\n # No exec here because we need to clean up the /work directory afterwards.\n \"sh\", \"-c\", f\"$0 \\\"$@\\\" && rm -rf {root / 'work'}\",\n ]\n\n return apivfs_cmd(root) + cmdline\n\n\nclass MkosiAsyncioThread(threading.Thread):\n \"\"\"\n The default threading.Thread() is not interruptable, so we make our own version by using the concurrency\n feature in python that is interruptable, namely asyncio.\n\n Additionally, we store any exception that the coroutine raises and re-raise it in join() if no other\n exception was raised before.\n \"\"\"\n\n def __init__(self, target: Awaitable[Any], *args: Any, **kwargs: Any) -> None:\n self.target = target\n self.loop: queue.SimpleQueue[asyncio.AbstractEventLoop] = queue.SimpleQueue()\n self.exc: queue.SimpleQueue[BaseException] = queue.SimpleQueue()\n super().__init__(*args, **kwargs)\n\n def run(self) -> None:\n async def wrapper() -> None:\n self.loop.put(asyncio.get_running_loop())\n await self.target\n\n try:\n asyncio.run(wrapper())\n except asyncio.CancelledError:\n pass\n except BaseException as e:\n self.exc.put(e)\n\n def cancel(self) -> None:\n loop = self.loop.get()\n\n for task in asyncio.tasks.all_tasks(loop):\n loop.call_soon_threadsafe(task.cancel)\n\n def __enter__(self) -> \"MkosiAsyncioThread\":\n self.start()\n return self\n\n def __exit__(\n self,\n type: Optional[Type[BaseException]],\n value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> None:\n self.cancel()\n self.join()\n\n if type is None:\n try:\n raise self.exc.get_nowait()\n except queue.Empty:\n pass\n", "path": "mkosi/run.py" } ]
[ { "content": "# SPDX-License-Identifier: LGPL-2.1+\n\nimport asyncio\nimport asyncio.tasks\nimport ctypes\nimport ctypes.util\nimport enum\nimport fcntl\nimport logging\nimport os\nimport pwd\nimport queue\nimport shlex\nimport signal\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\nimport threading\nfrom pathlib import Path\nfrom types import TracebackType\nfrom typing import Any, Awaitable, Mapping, Optional, Sequence, Tuple, Type\n\nfrom mkosi.log import ARG_DEBUG, ARG_DEBUG_SHELL, die\nfrom mkosi.types import _FILE, CompletedProcess, PathString, Popen\nfrom mkosi.util import InvokingUser, flock, make_executable, one_zero\n\nCLONE_NEWNS = 0x00020000\nCLONE_NEWUSER = 0x10000000\n\nSUBRANGE = 65536\n\n\ndef unshare(flags: int) -> None:\n libc_name = ctypes.util.find_library(\"c\")\n if libc_name is None:\n die(\"Could not find libc\")\n libc = ctypes.CDLL(libc_name, use_errno=True)\n\n if libc.unshare(ctypes.c_int(flags)) != 0:\n e = ctypes.get_errno()\n raise OSError(e, os.strerror(e))\n\n\ndef read_subrange(path: Path) -> int:\n uid = str(os.getuid())\n try:\n user = pwd.getpwuid(os.getuid()).pw_name\n except KeyError:\n user = None\n\n for line in path.read_text().splitlines():\n name, start, count = line.split(\":\")\n\n if name == uid or name == user:\n break\n else:\n die(f\"No mapping found for {user or uid} in {path}\")\n\n if int(count) < SUBRANGE:\n die(f\"subuid/subgid range length must be at least {SUBRANGE}, got {count} for {user or uid} from line '{line}'\")\n\n return int(start)\n\n\ndef become_root() -> tuple[int, int]:\n \"\"\"\n Set up a new user namespace mapping using /etc/subuid and /etc/subgid.\n\n The current user will be mapped to root and 65436 will be mapped to the UID/GID of the invoking user.\n The other IDs will be mapped through.\n\n The function returns the UID-GID pair of the invoking user in the namespace (65436, 65436).\n \"\"\"\n if os.getuid() == 0:\n return InvokingUser.uid_gid()\n\n subuid = read_subrange(Path(\"/etc/subuid\"))\n subgid = read_subrange(Path(\"/etc/subgid\"))\n\n pid = os.getpid()\n\n # We map the private UID range configured in /etc/subuid and /etc/subgid into the container using\n # newuidmap and newgidmap. On top of that, we also make sure to map in the user running mkosi so that\n # we can run still chown stuff to that user or run stuff as that user which will make sure any\n # generated files are owned by that user. We don't map to the last user in the range as the last user\n # is sometimes used in tests as a default value and mapping to that user might break those tests.\n newuidmap = [\n \"flock\", \"--exclusive\", \"--no-fork\", \"/etc/subuid\", \"newuidmap\", pid,\n 0, subuid, SUBRANGE - 100,\n SUBRANGE - 100, os.getuid(), 1,\n SUBRANGE - 100 + 1, subuid + SUBRANGE - 100 + 1, 99\n ]\n\n newgidmap = [\n \"flock\", \"--exclusive\", \"--no-fork\", \"/etc/subuid\", \"newgidmap\", pid,\n 0, subgid, SUBRANGE - 100,\n SUBRANGE - 100, os.getgid(), 1,\n SUBRANGE - 100 + 1, subgid + SUBRANGE - 100 + 1, 99\n ]\n\n newuidmap = [str(x) for x in newuidmap]\n newgidmap = [str(x) for x in newgidmap]\n\n # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid mapping\n # to the process in the user namespace. The mapping can only be assigned after the user namespace has\n # been unshared. To make this work, we first lock /etc/subuid, then spawn the newuidmap and newgidmap\n # processes, which we execute using flock so they don't execute before they can get a lock on /etc/subuid,\n # then we unshare the user namespace and finally we unlock /etc/subuid, which allows the newuidmap and\n # newgidmap processes to execute. we then wait for the processes to finish before continuing.\n with flock(Path(\"/etc/subuid\")) as fd, spawn(newuidmap) as uidmap, spawn(newgidmap) as gidmap:\n unshare(CLONE_NEWUSER)\n fcntl.flock(fd, fcntl.LOCK_UN)\n uidmap.wait()\n gidmap.wait()\n\n # By default, we're root in the user namespace because if we were our current user by default, we\n # wouldn't be able to chown stuff to be owned by root while the reverse is possible.\n os.setresuid(0, 0, 0)\n os.setresgid(0, 0, 0)\n os.setgroups([0])\n\n return SUBRANGE - 100, SUBRANGE - 100\n\n\ndef init_mount_namespace() -> None:\n unshare(CLONE_NEWNS)\n run([\"mount\", \"--make-rslave\", \"/\"])\n\n\ndef foreground(*, new_process_group: bool = True) -> None:\n \"\"\"\n If we're connected to a terminal, put the process in a new process group and make that the foreground\n process group so that only this process receives SIGINT.\n \"\"\"\n STDERR_FILENO = 2\n if os.isatty(STDERR_FILENO):\n if new_process_group:\n os.setpgrp()\n old = signal.signal(signal.SIGTTOU, signal.SIG_IGN)\n os.tcsetpgrp(STDERR_FILENO, os.getpgrp())\n signal.signal(signal.SIGTTOU, old)\n\n\ndef ensure_exc_info() -> Tuple[Type[BaseException], BaseException, TracebackType]:\n exctype, exc, tb = sys.exc_info()\n assert exctype\n assert exc\n assert tb\n return (exctype, exc, tb)\n\n\ndef run(\n cmdline: Sequence[PathString],\n check: bool = True,\n stdin: _FILE = None,\n stdout: _FILE = None,\n stderr: _FILE = None,\n input: Optional[str] = None,\n user: Optional[int] = None,\n group: Optional[int] = None,\n env: Mapping[str, str] = {},\n cwd: Optional[Path] = None,\n log: bool = True,\n) -> CompletedProcess:\n if ARG_DEBUG.get():\n logging.info(f\"+ {' '.join(str(s) for s in cmdline)}\")\n\n cmdline = [os.fspath(x) for x in cmdline]\n\n if not stdout and not stderr:\n # Unless explicit redirection is done, print all subprocess\n # output on stderr, since we do so as well for mkosi's own\n # output.\n stdout = sys.stderr\n\n env = dict(\n PATH=os.environ[\"PATH\"],\n TERM=os.getenv(\"TERM\", \"vt220\"),\n LANG=\"C.UTF-8\",\n ) | env\n\n if ARG_DEBUG.get():\n env[\"SYSTEMD_LOG_LEVEL\"] = \"debug\"\n\n if input is not None:\n assert stdin is None # stdin and input cannot be specified together\n elif stdin is None:\n stdin = subprocess.DEVNULL\n\n try:\n return subprocess.run(\n cmdline,\n check=check,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n input=input,\n text=True,\n user=user,\n group=group,\n env=env,\n cwd=cwd,\n preexec_fn=foreground,\n )\n except FileNotFoundError:\n die(f\"{cmdline[0]} not found in PATH.\")\n except subprocess.CalledProcessError as e:\n if log:\n logging.error(f\"\\\"{' '.join(str(s) for s in cmdline)}\\\" returned non-zero exit code {e.returncode}.\")\n raise e\n finally:\n foreground(new_process_group=False)\n\n\ndef spawn(\n cmdline: Sequence[PathString],\n stdin: _FILE = None,\n stdout: _FILE = None,\n stderr: _FILE = None,\n user: Optional[int] = None,\n group: Optional[int] = None,\n) -> Popen:\n if ARG_DEBUG.get():\n logging.info(f\"+ {' '.join(str(s) for s in cmdline)}\")\n\n if not stdout and not stderr:\n # Unless explicit redirection is done, print all subprocess\n # output on stderr, since we do so as well for mkosi's own\n # output.\n stdout = sys.stderr\n\n try:\n return subprocess.Popen(\n cmdline,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n text=True,\n user=user,\n group=group,\n preexec_fn=foreground,\n )\n except FileNotFoundError:\n die(f\"{cmdline[0]} not found in PATH.\")\n except subprocess.CalledProcessError as e:\n logging.error(f\"\\\"{' '.join(str(s) for s in cmdline)}\\\" returned non-zero exit code {e.returncode}.\")\n raise e\n\n\n# https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h\nclass Capability(enum.Enum):\n CAP_NET_ADMIN = 12\n\n\ndef have_effective_cap(capability: Capability) -> bool:\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"CapEff:\"):\n hexcap = line.removeprefix(\"CapEff:\").strip()\n break\n else:\n logging.warning(f\"\\\"CapEff:\\\" not found in /proc/self/status, assuming we don't have {capability}\")\n return False\n\n return (int(hexcap, 16) & (1 << capability.value)) != 0\n\n\ndef bwrap(\n cmd: Sequence[PathString],\n *,\n network: bool = False,\n options: Sequence[PathString] = (),\n log: bool = True,\n scripts: Mapping[str, Sequence[PathString]] = {},\n env: Mapping[str, str] = {},\n stdin: _FILE = None,\n input: Optional[str] = None,\n) -> CompletedProcess:\n cmdline: list[PathString] = [\n \"bwrap\",\n \"--dev-bind\", \"/\", \"/\",\n \"--remount-ro\", \"/\",\n \"--ro-bind\", \"/root\", \"/root\",\n \"--ro-bind\", \"/home\", \"/home\",\n \"--ro-bind\", \"/var\", \"/var\",\n \"--ro-bind\", \"/run\", \"/run\",\n \"--bind\", \"/var/tmp\", \"/var/tmp\",\n \"--tmpfs\", \"/tmp\",\n \"--bind\", Path.cwd(), Path.cwd(),\n \"--chdir\", Path.cwd(),\n \"--unshare-pid\",\n \"--unshare-ipc\",\n \"--unshare-cgroup\",\n *([\"--unshare-net\"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []),\n \"--die-with-parent\",\n \"--proc\", \"/proc\",\n \"--dev\", \"/dev\",\n \"--ro-bind\", \"/sys\", \"/sys\",\n \"--setenv\", \"SYSTEMD_OFFLINE\", one_zero(network),\n ]\n\n with tempfile.TemporaryDirectory(prefix=\"mkosi-scripts\") as d:\n\n for name, script in scripts.items():\n # Make sure we don't end up in a recursive loop when we name a script after the binary it execs\n # by removing the scripts directory from the PATH when we execute a script.\n (Path(d) / name).write_text(\n textwrap.dedent(\n f\"\"\"\\\n #!/bin/sh\n PATH=\"$(echo $PATH | tr ':' '\\\\n' | grep -v {Path(d)} | tr '\\\\n' ':')\"\n export PATH\n exec {shlex.join(str(s) for s in script)} \"$@\"\n \"\"\"\n )\n )\n\n make_executable(Path(d) / name)\n\n cmdline += [\n \"--setenv\", \"PATH\", f\"{d}:{os.environ['PATH']}\",\n *options,\n \"sh\", \"-c\", \"chmod 1777 /tmp /dev/shm && exec $0 \\\"$@\\\"\",\n ]\n\n try:\n result = run([*cmdline, *cmd], env=env, log=False, stdin=stdin, input=input)\n except subprocess.CalledProcessError as e:\n if log:\n logging.error(f\"\\\"{' '.join(str(s) for s in cmd)}\\\" returned non-zero exit code {e.returncode}.\")\n if ARG_DEBUG_SHELL.get():\n run([*cmdline, \"sh\"], stdin=sys.stdin, check=False, env=env, log=False)\n raise e\n\n return result\n\n\ndef finalize_passwd_mounts(root: Path) -> list[PathString]:\n \"\"\"\n If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we\n run the command, to make sure that the command we run uses user/group information from the apivfs\n directory instead of from the host. If the file doesn't exist yet, mount over /dev/null instead.\n \"\"\"\n options: list[PathString] = []\n\n for f in (\"passwd\", \"group\", \"shadow\", \"gshadow\"):\n if not (Path(\"etc\") / f).exists():\n continue\n p = root / \"etc\" / f\n if p.exists():\n options += [\"--bind\", p, f\"/etc/{f}\"]\n else:\n options += [\"--bind\", \"/dev/null\", f\"/etc/{f}\"]\n\n return options\n\n\ndef apivfs_cmd(root: Path) -> list[PathString]:\n cmdline: list[PathString] = [\n \"bwrap\",\n \"--dev-bind\", \"/\", \"/\",\n \"--chdir\", Path.cwd(),\n \"--tmpfs\", root / \"run\",\n \"--tmpfs\", root / \"tmp\",\n \"--bind\", os.getenv(\"TMPDIR\", \"/var/tmp\"), root / \"var/tmp\",\n \"--proc\", root / \"proc\",\n \"--dev\", root / \"dev\",\n \"--ro-bind\", \"/sys\", root / \"sys\",\n ]\n\n if (root / \"etc/machine-id\").exists():\n # Make sure /etc/machine-id is not overwritten by any package manager post install scripts.\n cmdline += [\"--ro-bind\", root / \"etc/machine-id\", root / \"etc/machine-id\"]\n\n cmdline += finalize_passwd_mounts(root)\n\n chmod = f\"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'}\"\n # Make sure anything running in the root directory thinks it's in a container. $container can't always be\n # accessed so we write /run/host/container-manager as well which is always accessible.\n container = f\"mkdir {root}/run/host && echo mkosi >{root}/run/host/container-manager\"\n\n cmdline += [\"sh\", \"-c\", f\"{chmod} && {container} && exec $0 \\\"$@\\\"\"]\n\n return cmdline\n\n\ndef chroot_cmd(root: Path, *, options: Sequence[PathString] = ()) -> list[PathString]:\n cmdline: list[PathString] = [\n \"bwrap\",\n \"--dev-bind\", root, \"/\",\n \"--setenv\", \"container\", \"mkosi\",\n \"--setenv\", \"HOME\", \"/\",\n \"--setenv\", \"PATH\", \"/usr/bin:/usr/sbin\",\n ]\n\n resolve = Path(\"etc/resolv.conf\")\n if (root / resolve).is_symlink():\n # For each component in the target path, bubblewrap will try to create it if it doesn't exist\n # yet. If a component in the path is a dangling symlink, bubblewrap will end up calling\n # mkdir(symlink) which obviously fails if multiple components of the dangling symlink path don't\n # exist yet. As a workaround, we resolve the symlink ourselves so that bubblewrap will correctly\n # create all missing components in the target path.\n resolve = resolve.parent / (root / resolve).readlink()\n\n cmdline += [\n \"--ro-bind\", \"/etc/resolv.conf\", Path(\"/\") / resolve,\n *options,\n # No exec here because we need to clean up the /work directory afterwards.\n \"sh\", \"-c\", f\"$0 \\\"$@\\\" && rm -rf {root / 'work'}\",\n ]\n\n return apivfs_cmd(root) + cmdline\n\n\nclass MkosiAsyncioThread(threading.Thread):\n \"\"\"\n The default threading.Thread() is not interruptable, so we make our own version by using the concurrency\n feature in python that is interruptable, namely asyncio.\n\n Additionally, we store any exception that the coroutine raises and re-raise it in join() if no other\n exception was raised before.\n \"\"\"\n\n def __init__(self, target: Awaitable[Any], *args: Any, **kwargs: Any) -> None:\n self.target = target\n self.loop: queue.SimpleQueue[asyncio.AbstractEventLoop] = queue.SimpleQueue()\n self.exc: queue.SimpleQueue[BaseException] = queue.SimpleQueue()\n super().__init__(*args, **kwargs)\n\n def run(self) -> None:\n async def wrapper() -> None:\n self.loop.put(asyncio.get_running_loop())\n await self.target\n\n try:\n asyncio.run(wrapper())\n except asyncio.CancelledError:\n pass\n except BaseException as e:\n self.exc.put(e)\n\n def cancel(self) -> None:\n loop = self.loop.get()\n\n for task in asyncio.tasks.all_tasks(loop):\n loop.call_soon_threadsafe(task.cancel)\n\n def __enter__(self) -> \"MkosiAsyncioThread\":\n self.start()\n return self\n\n def __exit__(\n self,\n type: Optional[Type[BaseException]],\n value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> None:\n self.cancel()\n self.join()\n\n if type is None:\n try:\n raise self.exc.get_nowait()\n except queue.Empty:\n pass\n", "path": "mkosi/run.py" } ]
diff --git a/mkosi/run.py b/mkosi/run.py index e1ed6ca84..96290b5ae 100644 --- a/mkosi/run.py +++ b/mkosi/run.py @@ -344,6 +344,8 @@ def finalize_passwd_mounts(root: Path) -> list[PathString]: options: list[PathString] = [] for f in ("passwd", "group", "shadow", "gshadow"): + if not (Path("etc") / f).exists(): + continue p = root / "etc" / f if p.exists(): options += ["--bind", p, f"/etc/{f}"]
mne-tools__mne-python-4664
UserWarning in decoding_rsa example Running this example, I get the following warning decoding_rsa.py:94: RuntimeWarning: More events than colors available. You should pass a list of unique colors.
[ { "content": "\"\"\"\n\n.. _rsa_noplot:\n\n====================================\nRepresentational Similarity Analysis\n====================================\n\nRepresentational Similarity Analysis is used to perform summary statistics\non supervised classifications where the number of classes is relatively high.\nIt consists in characterizing the structure of the confusion matrix to infer\nthe similarity between brain responses and serves as a proxy for characterizing\nthe space of mental representations [1]_ [2]_ [3]_.\n\nIn this example, we perform RSA on responses to 24 object images (among\na list of 92 images). Subjects were presented with images of human, animal\nand inanimate objects [4]_. Here we use the 24 unique images of faces\nand body parts.\n\n.. note:: this example will download a very large (~6GB) file, so we will not\n build the images below.\n\nReferences\n----------\n\n.. [1] Shepard, R. \"Multidimensional scaling, tree-fitting, and clustering.\"\n Science 210.4468 (1980): 390-398.\n.. [2] Laakso, A. & Cottrell, G.. \"Content and cluster analysis:\n assessing representational similarity in neural systems.\" Philosophical\n psychology 13.1 (2000): 47-76.\n.. [3] Kriegeskorte, N., Marieke, M., & Bandettini. P. \"Representational\n similarity analysis-connecting the branches of systems neuroscience.\"\n Frontiers in systems neuroscience 2 (2008): 4.\n.. [4] Cichy, R. M., Pantazis, D., & Oliva, A. \"Resolving human object\n recognition in space and time.\" Nature neuroscience (2014): 17(3),\n 455-462.\n\"\"\"\n\n# Authors: Jean-Remi King <[email protected]>\n# Jaakko Leppakangas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nfrom pandas import read_csv\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.manifold import MDS\n\nimport mne\nfrom mne.io import read_raw_fif, concatenate_raws\nfrom mne.datasets import visual_92_categories\n\nprint(__doc__)\n\ndata_path = visual_92_categories.data_path()\n\n# Define stimulus - trigger mapping\nfname = op.join(data_path, 'visual_stimuli.csv')\nconds = read_csv(fname)\nprint(conds.head(5))\n\n##############################################################################\n# Let's restrict the number of conditions to speed up computation\nmax_trigger = 24\nconds = conds[:max_trigger] # take only the first 24 rows\n\n##############################################################################\n# Define stimulus - trigger mapping\nconditions = []\nfor c in conds.values:\n cond_tags = list(c[:2])\n cond_tags += [('not-' if i == 0 else '') + conds.columns[k]\n for k, i in enumerate(c[2:], 2)]\n conditions.append('/'.join(map(str, cond_tags)))\nprint(conditions[:10])\n\n##############################################################################\n# Let's make the event_id dictionary\nevent_id = dict(zip(conditions, conds.trigger + 1))\nevent_id['0/human bodypart/human/not-face/animal/natural']\n\n##############################################################################\n# Read MEG data\nn_runs = 4 # 4 for full data (use less to speed up computations)\nfname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')\nraws = [read_raw_fif(fname % block) for block in range(n_runs)]\nraw = concatenate_raws(raws)\n\nevents = mne.find_events(raw, min_duration=.002)\n\nevents = events[events[:, 2] <= max_trigger]\nmne.viz.plot_events(events, sfreq=raw.info['sfreq'])\n\n##############################################################################\n# Epoch data\npicks = mne.pick_types(raw.info, meg=True)\nepochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,\n picks=picks, tmin=-.1, tmax=.500, preload=True)\n\n##############################################################################\n# Let's plot some conditions\nepochs['face'].average().plot()\nepochs['not-face'].average().plot()\n\n##############################################################################\n# Representational Similarity Analysis (RSA) is a neuroimaging-specific\n# appelation to refer to statistics applied to the confusion matrix\n# also referred to as the representational dissimilarity matrices (RDM).\n#\n# Compared to the approach from Cichy et al. we'll use a multiclass\n# classifier (Multinomial Logistic Regression) while the paper uses\n# all pairwise binary classification task to make the RDM.\n# Also we use here the ROC-AUC as performance metric while the\n# paper uses accuracy. Finally here for the sake of time we use\n# RSA on a window of data while Cichy et al. did it for all time\n# instants separately.\n\n# Classify using the average signal in the window 50ms to 300ms\n# to focus the classifier on the time interval with best SNR.\nclf = make_pipeline(StandardScaler(),\n LogisticRegression(C=1, solver='lbfgs'))\nX = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2)\ny = epochs.events[:, 2]\n\nclasses = set(y)\ncv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)\n\n# Compute confusion matrix for each cross-validation fold\ny_pred = np.zeros((len(y), len(classes)))\nfor train, test in cv.split(X, y):\n # Fit\n clf.fit(X[train], y[train])\n # Probabilistic prediction (necessary for ROC-AUC scoring metric)\n y_pred[test] = clf.predict_proba(X[test])\n\n##############################################################################\n# Compute confusion matrix using ROC-AUC\nconfusion = np.zeros((len(classes), len(classes)))\nfor ii, train_class in enumerate(classes):\n for jj in range(ii, len(classes)):\n confusion[ii, jj] = roc_auc_score(y == train_class, y_pred[:, jj])\n confusion[jj, ii] = confusion[ii, jj]\n\n##############################################################################\n# Plot\nlabels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6\nfig, ax = plt.subplots(1)\nim = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7])\nax.set_yticks(range(len(classes)))\nax.set_yticklabels(labels)\nax.set_xticks(range(len(classes)))\nax.set_xticklabels(labels, rotation=40, ha='left')\nax.axhline(11.5, color='k')\nax.axvline(11.5, color='k')\nplt.colorbar(im)\nplt.tight_layout()\nplt.show()\n\n##############################################################################\n# Confusion matrix related to mental representations have been historically\n# summarized with dimensionality reduction using multi-dimensional scaling [1].\n# See how the face samples cluster together.\nfig, ax = plt.subplots(1)\nmds = MDS(2, random_state=0, dissimilarity='precomputed')\nchance = 0.5\nsummary = mds.fit_transform(chance - confusion)\ncmap = plt.get_cmap('rainbow')\ncolors = ['r', 'b']\nnames = list(conds['condition'].values)\nfor color, name in zip(colors, set(names)):\n sel = np.where([this_name == name for this_name in names])[0]\n size = 500 if name == 'human face' else 100\n ax.scatter(summary[sel, 0], summary[sel, 1], s=size,\n facecolors=color, label=name, edgecolors='k')\nax.axis('off')\nax.legend(loc='lower right', scatterpoints=1, ncol=2)\nplt.tight_layout()\nplt.show()\n", "path": "examples/decoding/decoding_rsa.py" } ]
[ { "content": "\"\"\"\n\n.. _rsa_noplot:\n\n====================================\nRepresentational Similarity Analysis\n====================================\n\nRepresentational Similarity Analysis is used to perform summary statistics\non supervised classifications where the number of classes is relatively high.\nIt consists in characterizing the structure of the confusion matrix to infer\nthe similarity between brain responses and serves as a proxy for characterizing\nthe space of mental representations [1]_ [2]_ [3]_.\n\nIn this example, we perform RSA on responses to 24 object images (among\na list of 92 images). Subjects were presented with images of human, animal\nand inanimate objects [4]_. Here we use the 24 unique images of faces\nand body parts.\n\n.. note:: this example will download a very large (~6GB) file, so we will not\n build the images below.\n\nReferences\n----------\n\n.. [1] Shepard, R. \"Multidimensional scaling, tree-fitting, and clustering.\"\n Science 210.4468 (1980): 390-398.\n.. [2] Laakso, A. & Cottrell, G.. \"Content and cluster analysis:\n assessing representational similarity in neural systems.\" Philosophical\n psychology 13.1 (2000): 47-76.\n.. [3] Kriegeskorte, N., Marieke, M., & Bandettini. P. \"Representational\n similarity analysis-connecting the branches of systems neuroscience.\"\n Frontiers in systems neuroscience 2 (2008): 4.\n.. [4] Cichy, R. M., Pantazis, D., & Oliva, A. \"Resolving human object\n recognition in space and time.\" Nature neuroscience (2014): 17(3),\n 455-462.\n\"\"\"\n\n# Authors: Jean-Remi King <[email protected]>\n# Jaakko Leppakangas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nfrom pandas import read_csv\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.manifold import MDS\n\nimport mne\nfrom mne.io import read_raw_fif, concatenate_raws\nfrom mne.datasets import visual_92_categories\n\nprint(__doc__)\n\ndata_path = visual_92_categories.data_path()\n\n# Define stimulus - trigger mapping\nfname = op.join(data_path, 'visual_stimuli.csv')\nconds = read_csv(fname)\nprint(conds.head(5))\n\n##############################################################################\n# Let's restrict the number of conditions to speed up computation\nmax_trigger = 24\nconds = conds[:max_trigger] # take only the first 24 rows\n\n##############################################################################\n# Define stimulus - trigger mapping\nconditions = []\nfor c in conds.values:\n cond_tags = list(c[:2])\n cond_tags += [('not-' if i == 0 else '') + conds.columns[k]\n for k, i in enumerate(c[2:], 2)]\n conditions.append('/'.join(map(str, cond_tags)))\nprint(conditions[:10])\n\n##############################################################################\n# Let's make the event_id dictionary\nevent_id = dict(zip(conditions, conds.trigger + 1))\nevent_id['0/human bodypart/human/not-face/animal/natural']\n\n##############################################################################\n# Read MEG data\nn_runs = 4 # 4 for full data (use less to speed up computations)\nfname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')\nraws = [read_raw_fif(fname % block) for block in range(n_runs)]\nraw = concatenate_raws(raws)\n\nevents = mne.find_events(raw, min_duration=.002)\n\nevents = events[events[:, 2] <= max_trigger]\n\n##############################################################################\n# Epoch data\npicks = mne.pick_types(raw.info, meg=True)\nepochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,\n picks=picks, tmin=-.1, tmax=.500, preload=True)\n\n##############################################################################\n# Let's plot some conditions\nepochs['face'].average().plot()\nepochs['not-face'].average().plot()\n\n##############################################################################\n# Representational Similarity Analysis (RSA) is a neuroimaging-specific\n# appelation to refer to statistics applied to the confusion matrix\n# also referred to as the representational dissimilarity matrices (RDM).\n#\n# Compared to the approach from Cichy et al. we'll use a multiclass\n# classifier (Multinomial Logistic Regression) while the paper uses\n# all pairwise binary classification task to make the RDM.\n# Also we use here the ROC-AUC as performance metric while the\n# paper uses accuracy. Finally here for the sake of time we use\n# RSA on a window of data while Cichy et al. did it for all time\n# instants separately.\n\n# Classify using the average signal in the window 50ms to 300ms\n# to focus the classifier on the time interval with best SNR.\nclf = make_pipeline(StandardScaler(),\n LogisticRegression(C=1, solver='lbfgs'))\nX = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2)\ny = epochs.events[:, 2]\n\nclasses = set(y)\ncv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)\n\n# Compute confusion matrix for each cross-validation fold\ny_pred = np.zeros((len(y), len(classes)))\nfor train, test in cv.split(X, y):\n # Fit\n clf.fit(X[train], y[train])\n # Probabilistic prediction (necessary for ROC-AUC scoring metric)\n y_pred[test] = clf.predict_proba(X[test])\n\n##############################################################################\n# Compute confusion matrix using ROC-AUC\nconfusion = np.zeros((len(classes), len(classes)))\nfor ii, train_class in enumerate(classes):\n for jj in range(ii, len(classes)):\n confusion[ii, jj] = roc_auc_score(y == train_class, y_pred[:, jj])\n confusion[jj, ii] = confusion[ii, jj]\n\n##############################################################################\n# Plot\nlabels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6\nfig, ax = plt.subplots(1)\nim = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7])\nax.set_yticks(range(len(classes)))\nax.set_yticklabels(labels)\nax.set_xticks(range(len(classes)))\nax.set_xticklabels(labels, rotation=40, ha='left')\nax.axhline(11.5, color='k')\nax.axvline(11.5, color='k')\nplt.colorbar(im)\nplt.tight_layout()\nplt.show()\n\n##############################################################################\n# Confusion matrix related to mental representations have been historically\n# summarized with dimensionality reduction using multi-dimensional scaling [1].\n# See how the face samples cluster together.\nfig, ax = plt.subplots(1)\nmds = MDS(2, random_state=0, dissimilarity='precomputed')\nchance = 0.5\nsummary = mds.fit_transform(chance - confusion)\ncmap = plt.get_cmap('rainbow')\ncolors = ['r', 'b']\nnames = list(conds['condition'].values)\nfor color, name in zip(colors, set(names)):\n sel = np.where([this_name == name for this_name in names])[0]\n size = 500 if name == 'human face' else 100\n ax.scatter(summary[sel, 0], summary[sel, 1], s=size,\n facecolors=color, label=name, edgecolors='k')\nax.axis('off')\nax.legend(loc='lower right', scatterpoints=1, ncol=2)\nplt.tight_layout()\nplt.show()\n", "path": "examples/decoding/decoding_rsa.py" } ]
diff --git a/examples/decoding/decoding_rsa.py b/examples/decoding/decoding_rsa.py index 4d4925e7cbf..6ccf1ffe60c 100644 --- a/examples/decoding/decoding_rsa.py +++ b/examples/decoding/decoding_rsa.py @@ -97,7 +97,6 @@ events = mne.find_events(raw, min_duration=.002) events = events[events[:, 2] <= max_trigger] -mne.viz.plot_events(events, sfreq=raw.info['sfreq']) ############################################################################## # Epoch data
ESMCI__cime-3605
NLCOMP fails with python3 because dictionaries no longer support `has_key` When using python3, I get: ``` $ ./case.cmpgen_namelists Comparing namelists with baselines 'lilac_0703a' Generating namelists to baselines 'lilac_0703b' Exception during namelist operations: 'dict' object has no attribute 'has_key' Traceback (most recent call last): File "/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/case/case_cmpgen_namelists.py", line 123, in case_cmpgen_namelists success, output = _do_full_nl_comp(self, test_name, compare_name, baseline_root) File "/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/case/case_cmpgen_namelists.py", line 45, in _do_full_nl_comp success, current_comments = compare_runconfigfiles(baseline_counterpart, item, test) File "/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/simple_compare.py", line 171, in compare_runconfigfiles comments = findDiff(gold_dict, compare_dict, case=case) File "/Users/sacks/ctsm/ctsm3/cime/scripts/Tools/../../scripts/lib/CIME/simple_compare.py", line 215, in findDiff if not d2.has_key(k): AttributeError: 'dict' object has no attribute 'has_key' ``` I have a fix incoming.
[ { "content": "import os, re\n\nfrom CIME.utils import expect\n\n###############################################################################\ndef _normalize_string_value(value, case):\n###############################################################################\n \"\"\"\n Some of the strings are inherently prone to diffs, like file\n paths, etc. This function attempts to normalize that data so that\n it will not cause diffs.\n \"\"\"\n # Any occurance of case must be normalized because test-ids might not match\n if (case is not None):\n case_re = re.compile(r'{}[.]([GC])[.]([^./\\s]+)'.format(case))\n value = case_re.sub(\"{}.ACTION.TESTID\".format(case), value)\n\n if (\"/\" in value):\n # File path, just return the basename\n return os.path.basename(value)\n elif (\"username\" in value):\n return ''\n elif (\".log.\" in value):\n # Remove the part that's prone to diff\n components = value.split(\".\")\n return os.path.basename(\".\".join(components[0:-1]))\n else:\n return value\n\n###############################################################################\ndef _skip_comments_and_whitespace(lines, idx):\n###############################################################################\n \"\"\"\n Starting at idx, return next valid idx of lines that contains real data\n \"\"\"\n if (idx == len(lines)):\n return idx\n\n comment_re = re.compile(r'^[#!]')\n\n lines_slice = lines[idx:]\n for line in lines_slice:\n line = line.strip()\n if (comment_re.match(line) is not None or line == \"\"):\n idx += 1\n else:\n return idx\n\n return idx\n\n###############################################################################\ndef _compare_data(gold_lines, comp_lines, case, offset_method=False):\n###############################################################################\n \"\"\"\n >>> teststr = '''\n ... data1\n ... data2 data3\n ... data4 data5 data6\n ...\n ... # Comment\n ... data7 data8 data9 data10\n ... '''\n >>> _compare_data(teststr.splitlines(), teststr.splitlines(), None)\n ('', 0)\n\n >>> teststr2 = '''\n ... data1\n ... data2 data30\n ... data4 data5 data6\n ... data7 data8 data9 data10\n ... data00\n ... '''\n >>> results,_ = _compare_data(teststr.splitlines(), teststr2.splitlines(), None)\n >>> print(results)\n Inequivalent lines data2 data3 != data2 data30\n NORMALIZED: data2 data3 != data2 data30\n Found extra lines\n data00\n <BLANKLINE>\n >>> teststr3 = '''\n ... data1\n ... data4 data5 data6\n ... data7 data8 data9 data10\n ... data00\n ... '''\n >>> results,_ = _compare_data(teststr3.splitlines(), teststr2.splitlines(), None, offset_method=True)\n >>> print(results)\n Inequivalent lines data4 data5 data6 != data2 data30\n NORMALIZED: data4 data5 data6 != data2 data30\n <BLANKLINE>\n \"\"\"\n comments = \"\"\n cnt = 0\n gidx, cidx = 0, 0\n gnum, cnum = len(gold_lines), len(comp_lines)\n while (gidx < gnum or cidx < cnum):\n gidx = _skip_comments_and_whitespace(gold_lines, gidx)\n cidx = _skip_comments_and_whitespace(comp_lines, cidx)\n\n if (gidx == gnum):\n if (cidx == cnum):\n return comments, cnt\n else:\n comments += \"Found extra lines\\n\"\n comments += \"\\n\".join(comp_lines[cidx:]) + \"\\n\"\n return comments, cnt\n elif (cidx == cnum):\n comments += \"Missing lines\\n\"\n comments += \"\\n\".join(gold_lines[gidx:1]) + \"\\n\"\n return comments, cnt\n\n gold_value = gold_lines[gidx].strip()\n gold_value = gold_value.replace('\"',\"'\")\n comp_value = comp_lines[cidx].strip()\n comp_value = comp_value.replace('\"',\"'\")\n\n norm_gold_value = _normalize_string_value(gold_value, case)\n norm_comp_value = _normalize_string_value(comp_value, case)\n\n if (norm_gold_value != norm_comp_value):\n comments += \"Inequivalent lines {} != {}\\n\".format(gold_value, comp_value)\n comments += \" NORMALIZED: {} != {}\\n\".format(norm_gold_value, norm_comp_value)\n cnt += 1\n if offset_method and (norm_gold_value != norm_comp_value):\n if gnum > cnum:\n gidx += 1\n else:\n cidx += 1\n else:\n gidx += 1\n cidx += 1\n\n return comments, cnt\n\n###############################################################################\ndef compare_files(gold_file, compare_file, case=None):\n###############################################################################\n \"\"\"\n Returns true if files are the same, comments are returned too:\n (success, comments)\n \"\"\"\n expect(os.path.exists(gold_file), \"File not found: {}\".format(gold_file))\n expect(os.path.exists(compare_file), \"File not found: {}\".format(compare_file))\n\n comments, cnt = _compare_data(open(gold_file, \"r\").readlines(),\n open(compare_file, \"r\").readlines(), case)\n\n if cnt > 0:\n comments2, cnt2 = _compare_data(open(gold_file, \"r\").readlines(),\n open(compare_file, \"r\").readlines(),\n case, offset_method=True)\n if cnt2 < cnt:\n comments = comments2\n\n return comments == \"\", comments\n\n###############################################################################\ndef compare_runconfigfiles(gold_file, compare_file, case=None):\n###############################################################################\n \"\"\"\n Returns true if files are the same, comments are returned too:\n (success, comments)\n \"\"\"\n expect(os.path.exists(gold_file), \"File not found: {}\".format(gold_file))\n expect(os.path.exists(compare_file), \"File not found: {}\".format(compare_file))\n\n #create dictionary's of the runconfig files and compare them\n gold_dict = _parse_runconfig(gold_file)\n compare_dict = _parse_runconfig(compare_file)\n\n comments = findDiff(gold_dict, compare_dict, case=case)\n comments = comments.replace(\" d1\", \" \" + gold_file)\n comments = comments.replace(\" d2\", \" \" + compare_file)\n # this picks up the case that an entry in compare is not in gold\n if comments == \"\":\n comments = findDiff(compare_dict, gold_dict, case=case)\n comments = comments.replace(\" d2\", \" \" + gold_file)\n comments = comments.replace(\" d1\", \" \" + compare_file)\n\n return comments == \"\", comments\n\ndef _parse_runconfig(filename):\n runconfig = {}\n inrunseq = False\n insubsection = None\n subsection_re = re.compile(r'\\s*(\\S+)::')\n group_re = re.compile(r'\\s*(\\S+)\\s*:\\s*(\\S+)')\n var_re = re.compile(r'\\s*(\\S+)\\s*=\\s*(\\S+)')\n with open(filename, \"r\") as fd:\n for line in fd:\n # remove comments\n line = line.split('#')[0]\n subsection_match = subsection_re.match(line)\n group_match = group_re.match(line)\n var_match = var_re.match(line)\n if re.match(r'\\s*runSeq\\s*::', line):\n runconfig['runSeq'] = []\n inrunseq = True\n elif re.match(r'\\s*::\\s*', line):\n inrunseq = False\n elif inrunseq:\n runconfig['runSeq'].append(line)\n elif subsection_match:\n insubsection = subsection_match.group(1)\n runconfig[insubsection] = {}\n elif group_match:\n runconfig[group_match.group(1)] = group_match.group(2)\n elif insubsection and var_match:\n runconfig[insubsection][var_match.group(1)] = var_match.group(2)\n return runconfig\n\ndef findDiff(d1, d2, path=\"\", case=None):\n comment = \"\"\n for k in d1.keys():\n if not d2.has_key(k):\n comment += path + \":\\n\"\n comment += k + \" as key not in d2\\n\"\n else:\n if type(d1[k]) is dict:\n if path == \"\":\n path = k\n else:\n path = path + \"->\" + k\n comment += findDiff(d1[k],d2[k], path=path, case=case)\n else:\n if case in d1[k]:\n pass\n elif \"username\" in k:\n pass\n elif \"logfile\" in k:\n pass\n elif d1[k] != d2[k]:\n comment += path+\":\\n\"\n comment += \" - {} : {}\\n\".format(k,d1[k])\n comment += \" + {} : {}\\n\".format(k,d2[k])\n return comment\n", "path": "scripts/lib/CIME/simple_compare.py" } ]
[ { "content": "import os, re\n\nfrom CIME.utils import expect\n\n###############################################################################\ndef _normalize_string_value(value, case):\n###############################################################################\n \"\"\"\n Some of the strings are inherently prone to diffs, like file\n paths, etc. This function attempts to normalize that data so that\n it will not cause diffs.\n \"\"\"\n # Any occurance of case must be normalized because test-ids might not match\n if (case is not None):\n case_re = re.compile(r'{}[.]([GC])[.]([^./\\s]+)'.format(case))\n value = case_re.sub(\"{}.ACTION.TESTID\".format(case), value)\n\n if (\"/\" in value):\n # File path, just return the basename\n return os.path.basename(value)\n elif (\"username\" in value):\n return ''\n elif (\".log.\" in value):\n # Remove the part that's prone to diff\n components = value.split(\".\")\n return os.path.basename(\".\".join(components[0:-1]))\n else:\n return value\n\n###############################################################################\ndef _skip_comments_and_whitespace(lines, idx):\n###############################################################################\n \"\"\"\n Starting at idx, return next valid idx of lines that contains real data\n \"\"\"\n if (idx == len(lines)):\n return idx\n\n comment_re = re.compile(r'^[#!]')\n\n lines_slice = lines[idx:]\n for line in lines_slice:\n line = line.strip()\n if (comment_re.match(line) is not None or line == \"\"):\n idx += 1\n else:\n return idx\n\n return idx\n\n###############################################################################\ndef _compare_data(gold_lines, comp_lines, case, offset_method=False):\n###############################################################################\n \"\"\"\n >>> teststr = '''\n ... data1\n ... data2 data3\n ... data4 data5 data6\n ...\n ... # Comment\n ... data7 data8 data9 data10\n ... '''\n >>> _compare_data(teststr.splitlines(), teststr.splitlines(), None)\n ('', 0)\n\n >>> teststr2 = '''\n ... data1\n ... data2 data30\n ... data4 data5 data6\n ... data7 data8 data9 data10\n ... data00\n ... '''\n >>> results,_ = _compare_data(teststr.splitlines(), teststr2.splitlines(), None)\n >>> print(results)\n Inequivalent lines data2 data3 != data2 data30\n NORMALIZED: data2 data3 != data2 data30\n Found extra lines\n data00\n <BLANKLINE>\n >>> teststr3 = '''\n ... data1\n ... data4 data5 data6\n ... data7 data8 data9 data10\n ... data00\n ... '''\n >>> results,_ = _compare_data(teststr3.splitlines(), teststr2.splitlines(), None, offset_method=True)\n >>> print(results)\n Inequivalent lines data4 data5 data6 != data2 data30\n NORMALIZED: data4 data5 data6 != data2 data30\n <BLANKLINE>\n \"\"\"\n comments = \"\"\n cnt = 0\n gidx, cidx = 0, 0\n gnum, cnum = len(gold_lines), len(comp_lines)\n while (gidx < gnum or cidx < cnum):\n gidx = _skip_comments_and_whitespace(gold_lines, gidx)\n cidx = _skip_comments_and_whitespace(comp_lines, cidx)\n\n if (gidx == gnum):\n if (cidx == cnum):\n return comments, cnt\n else:\n comments += \"Found extra lines\\n\"\n comments += \"\\n\".join(comp_lines[cidx:]) + \"\\n\"\n return comments, cnt\n elif (cidx == cnum):\n comments += \"Missing lines\\n\"\n comments += \"\\n\".join(gold_lines[gidx:1]) + \"\\n\"\n return comments, cnt\n\n gold_value = gold_lines[gidx].strip()\n gold_value = gold_value.replace('\"',\"'\")\n comp_value = comp_lines[cidx].strip()\n comp_value = comp_value.replace('\"',\"'\")\n\n norm_gold_value = _normalize_string_value(gold_value, case)\n norm_comp_value = _normalize_string_value(comp_value, case)\n\n if (norm_gold_value != norm_comp_value):\n comments += \"Inequivalent lines {} != {}\\n\".format(gold_value, comp_value)\n comments += \" NORMALIZED: {} != {}\\n\".format(norm_gold_value, norm_comp_value)\n cnt += 1\n if offset_method and (norm_gold_value != norm_comp_value):\n if gnum > cnum:\n gidx += 1\n else:\n cidx += 1\n else:\n gidx += 1\n cidx += 1\n\n return comments, cnt\n\n###############################################################################\ndef compare_files(gold_file, compare_file, case=None):\n###############################################################################\n \"\"\"\n Returns true if files are the same, comments are returned too:\n (success, comments)\n \"\"\"\n expect(os.path.exists(gold_file), \"File not found: {}\".format(gold_file))\n expect(os.path.exists(compare_file), \"File not found: {}\".format(compare_file))\n\n comments, cnt = _compare_data(open(gold_file, \"r\").readlines(),\n open(compare_file, \"r\").readlines(), case)\n\n if cnt > 0:\n comments2, cnt2 = _compare_data(open(gold_file, \"r\").readlines(),\n open(compare_file, \"r\").readlines(),\n case, offset_method=True)\n if cnt2 < cnt:\n comments = comments2\n\n return comments == \"\", comments\n\n###############################################################################\ndef compare_runconfigfiles(gold_file, compare_file, case=None):\n###############################################################################\n \"\"\"\n Returns true if files are the same, comments are returned too:\n (success, comments)\n \"\"\"\n expect(os.path.exists(gold_file), \"File not found: {}\".format(gold_file))\n expect(os.path.exists(compare_file), \"File not found: {}\".format(compare_file))\n\n #create dictionary's of the runconfig files and compare them\n gold_dict = _parse_runconfig(gold_file)\n compare_dict = _parse_runconfig(compare_file)\n\n comments = findDiff(gold_dict, compare_dict, case=case)\n comments = comments.replace(\" d1\", \" \" + gold_file)\n comments = comments.replace(\" d2\", \" \" + compare_file)\n # this picks up the case that an entry in compare is not in gold\n if comments == \"\":\n comments = findDiff(compare_dict, gold_dict, case=case)\n comments = comments.replace(\" d2\", \" \" + gold_file)\n comments = comments.replace(\" d1\", \" \" + compare_file)\n\n return comments == \"\", comments\n\ndef _parse_runconfig(filename):\n runconfig = {}\n inrunseq = False\n insubsection = None\n subsection_re = re.compile(r'\\s*(\\S+)::')\n group_re = re.compile(r'\\s*(\\S+)\\s*:\\s*(\\S+)')\n var_re = re.compile(r'\\s*(\\S+)\\s*=\\s*(\\S+)')\n with open(filename, \"r\") as fd:\n for line in fd:\n # remove comments\n line = line.split('#')[0]\n subsection_match = subsection_re.match(line)\n group_match = group_re.match(line)\n var_match = var_re.match(line)\n if re.match(r'\\s*runSeq\\s*::', line):\n runconfig['runSeq'] = []\n inrunseq = True\n elif re.match(r'\\s*::\\s*', line):\n inrunseq = False\n elif inrunseq:\n runconfig['runSeq'].append(line)\n elif subsection_match:\n insubsection = subsection_match.group(1)\n runconfig[insubsection] = {}\n elif group_match:\n runconfig[group_match.group(1)] = group_match.group(2)\n elif insubsection and var_match:\n runconfig[insubsection][var_match.group(1)] = var_match.group(2)\n return runconfig\n\ndef findDiff(d1, d2, path=\"\", case=None):\n comment = \"\"\n for k in d1.keys():\n if not k in d2:\n comment += path + \":\\n\"\n comment += k + \" as key not in d2\\n\"\n else:\n if type(d1[k]) is dict:\n if path == \"\":\n path = k\n else:\n path = path + \"->\" + k\n comment += findDiff(d1[k],d2[k], path=path, case=case)\n else:\n if case in d1[k]:\n pass\n elif \"username\" in k:\n pass\n elif \"logfile\" in k:\n pass\n elif d1[k] != d2[k]:\n comment += path+\":\\n\"\n comment += \" - {} : {}\\n\".format(k,d1[k])\n comment += \" + {} : {}\\n\".format(k,d2[k])\n return comment\n", "path": "scripts/lib/CIME/simple_compare.py" } ]
diff --git a/scripts/lib/CIME/simple_compare.py b/scripts/lib/CIME/simple_compare.py index 69ef73f04b2..99859f579fb 100644 --- a/scripts/lib/CIME/simple_compare.py +++ b/scripts/lib/CIME/simple_compare.py @@ -212,7 +212,7 @@ def _parse_runconfig(filename): def findDiff(d1, d2, path="", case=None): comment = "" for k in d1.keys(): - if not d2.has_key(k): + if not k in d2: comment += path + ":\n" comment += k + " as key not in d2\n" else:
Textualize__textual-1278
Type selectors can't contain a number It seems that type selectors can't handle types that have a number in their name. For example this: ```python from textual.app import App, ComposeResult from textual.widgets import Label class H1( Label ): pass class NumberError( App[ None ] ): CSS = """ H1 { text-style: bold; color: yellow; background: red; } """ def compose( self ) -> ComposeResult: yield H1( "This is a very important heading" ) if __name__ == "__main__": NumberError().run() ``` throws this error: ``` Error in stylesheet: /Users/davep/develop/python/textual-sandbox/not_all_types.py:NumberError:1:5 ╭───────────────────────────────────────────────────────────────────────────────────────────╮ │ ❱ 1 │ │ │ 2 │ H1 { │ │ 3 │ │ text-style: bold; │ ╰───────────────────────────────────────────────────────────────────────────────────────────╯ • Expected one of 'combinator child', 'comment start', 'declaration set start', 'new selector', 'pseudo class', 'selector', 'selector class', 'selector id', 'selector universal', or 'whitespace'. • Did you forget a semicolon at the end of a line? ``` whereas this works fine: ```python from textual.app import App, ComposeResult from textual.widgets import Label class HOne( Label ): pass class NumberError( App[ None ] ): CSS = """ HOne { text-style: bold; color: yellow; background: red; } """ def compose( self ) -> ComposeResult: yield HOne( "This is a very important heading" ) if __name__ == "__main__": NumberError().run() ``` as does, of course, this: ```python from textual.app import App, ComposeResult from textual.widgets import Label class H1( Label ): pass class NumberError( App[ None ] ): CSS = """ Label { text-style: bold; color: yellow; background: red; } """ def compose( self ) -> ComposeResult: yield H1( "This is a very important heading" ) if __name__ == "__main__": NumberError().run() ```
[ { "content": "from __future__ import annotations\n\nimport re\nfrom pathlib import PurePath\nfrom typing import Iterable\n\nfrom textual.css.tokenizer import Expect, Tokenizer, Token\n\nPERCENT = r\"-?\\d+\\.?\\d*%\"\nDECIMAL = r\"-?\\d+\\.?\\d*\"\nCOMMA = r\"\\s*,\\s*\"\nOPEN_BRACE = r\"\\(\\s*\"\nCLOSE_BRACE = r\"\\s*\\)\"\n\nHEX_COLOR = r\"\\#[0-9a-fA-F]{8}|\\#[0-9a-fA-F]{6}|\\#[0-9a-fA-F]{4}|\\#[0-9a-fA-F]{3}\"\nRGB_COLOR = rf\"rgb{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}|rgba{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\nHSL_COLOR = rf\"hsl{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{CLOSE_BRACE}|hsla{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\n\nCOMMENT_START = r\"\\/\\*\"\nSCALAR = rf\"{DECIMAL}(?:fr|%|w|h|vw|vh)\"\nDURATION = r\"\\d+\\.?\\d*(?:ms|s)\"\nNUMBER = r\"\\-?\\d+\\.?\\d*\"\nCOLOR = rf\"{HEX_COLOR}|{RGB_COLOR}|{HSL_COLOR}\"\nKEY_VALUE = r\"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\\-\\/]+\"\nTOKEN = \"[a-zA-Z][a-zA-Z0-9_-]*\"\nSTRING = r\"\\\".*?\\\"\"\nVARIABLE_REF = r\"\\$[a-zA-Z0-9_\\-]+\"\n\nIDENTIFIER = r\"[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\"\n\n# Values permitted in variable and rule declarations.\nDECLARATION_VALUES = {\n \"scalar\": SCALAR,\n \"duration\": DURATION,\n \"number\": NUMBER,\n \"color\": COLOR,\n \"key_value\": KEY_VALUE,\n \"token\": TOKEN,\n \"string\": STRING,\n \"variable_ref\": VARIABLE_REF,\n}\n\n# The tokenizers \"expectation\" while at the root/highest level of scope\n# in the CSS file. At this level we might expect to see selectors, comments,\n# variable definitions etc.\nexpect_root_scope = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n selector_start_id=r\"\\#\" + IDENTIFIER,\n selector_start_class=r\"\\.\" + IDENTIFIER,\n selector_start_universal=r\"\\*\",\n selector_start=r\"[a-zA-Z_\\-]+\",\n variable_name=rf\"{VARIABLE_REF}:\",\n).expect_eof(True)\n\n# After a variable declaration e.g. \"$warning-text: TOKENS;\"\n# for tokenizing variable value ------^~~~~~~^\nexpect_variable_name_continue = Expect(\n variable_value_end=r\"\\n|;\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n).expect_eof(True)\n\nexpect_comment_end = Expect(\n comment_end=re.escape(\"*/\"),\n)\n\n# After we come across a selector in CSS e.g. \".my-class\", we may\n# find other selectors, pseudo-classes... e.g. \".my-class :hover\"\nexpect_selector_continue = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n pseudo_class=r\"\\:[a-zA-Z_-]+\",\n selector_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_universal=r\"\\*\",\n selector=r\"[a-zA-Z_\\-]+\",\n combinator_child=\">\",\n new_selector=r\",\",\n declaration_set_start=r\"\\{\",\n)\n\n# A rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_solo = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n# The value(s)/content from a rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration_content = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_content_solo = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n\nclass TokenizerState:\n \"\"\"State machine for the tokenizer.\n\n Attributes:\n EXPECT: The initial expectation of the tokenizer. Since we start tokenizing\n at the root scope, we might expect to see either a variable or selector, for example.\n STATE_MAP: Maps token names to Expects, defines the sets of valid tokens\n that we'd expect to see next, given the current token. For example, if\n we've just processed a variable declaration name, we next expect to see\n the value of that variable.\n \"\"\"\n\n EXPECT = expect_root_scope\n STATE_MAP = {\n \"variable_name\": expect_variable_name_continue,\n \"variable_value_end\": expect_root_scope,\n \"selector_start\": expect_selector_continue,\n \"selector_start_id\": expect_selector_continue,\n \"selector_start_class\": expect_selector_continue,\n \"selector_start_universal\": expect_selector_continue,\n \"selector_id\": expect_selector_continue,\n \"selector_class\": expect_selector_continue,\n \"selector_universal\": expect_selector_continue,\n \"declaration_set_start\": expect_declaration,\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration,\n \"declaration_set_end\": expect_root_scope,\n }\n\n def __call__(self, code: str, path: str | PurePath) -> Iterable[Token]:\n tokenizer = Tokenizer(code, path=path)\n expect = self.EXPECT\n get_token = tokenizer.get_token\n get_state = self.STATE_MAP.get\n while True:\n token = get_token(expect)\n name = token.name\n if name == \"comment_start\":\n tokenizer.skip_to(expect_comment_end)\n continue\n elif name == \"eof\":\n break\n expect = get_state(name, expect)\n yield token\n\n\nclass DeclarationTokenizerState(TokenizerState):\n EXPECT = expect_declaration_solo\n STATE_MAP = {\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration_solo,\n }\n\n\nclass ValueTokenizerState(TokenizerState):\n EXPECT = expect_declaration_content_solo\n\n\ntokenize = TokenizerState()\ntokenize_declarations = DeclarationTokenizerState()\ntokenize_value = ValueTokenizerState()\n\n\ndef tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:\n \"\"\"Tokens the values in a dict of strings.\n\n Args:\n values (dict[str, str]): A mapping of CSS variable name on to a value, to be\n added to the CSS context.\n\n Returns:\n dict[str, list[Token]]: A mapping of name on to a list of tokens,\n \"\"\"\n value_tokens = {\n name: list(tokenize_value(value, \"__name__\")) for name, value in values.items()\n }\n return value_tokens\n", "path": "src/textual/css/tokenize.py" } ]
[ { "content": "from __future__ import annotations\n\nimport re\nfrom pathlib import PurePath\nfrom typing import Iterable\n\nfrom textual.css.tokenizer import Expect, Tokenizer, Token\n\nPERCENT = r\"-?\\d+\\.?\\d*%\"\nDECIMAL = r\"-?\\d+\\.?\\d*\"\nCOMMA = r\"\\s*,\\s*\"\nOPEN_BRACE = r\"\\(\\s*\"\nCLOSE_BRACE = r\"\\s*\\)\"\n\nHEX_COLOR = r\"\\#[0-9a-fA-F]{8}|\\#[0-9a-fA-F]{6}|\\#[0-9a-fA-F]{4}|\\#[0-9a-fA-F]{3}\"\nRGB_COLOR = rf\"rgb{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}|rgba{OPEN_BRACE}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\nHSL_COLOR = rf\"hsl{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{CLOSE_BRACE}|hsla{OPEN_BRACE}{DECIMAL}{COMMA}{PERCENT}{COMMA}{PERCENT}{COMMA}{DECIMAL}{CLOSE_BRACE}\"\n\nCOMMENT_START = r\"\\/\\*\"\nSCALAR = rf\"{DECIMAL}(?:fr|%|w|h|vw|vh)\"\nDURATION = r\"\\d+\\.?\\d*(?:ms|s)\"\nNUMBER = r\"\\-?\\d+\\.?\\d*\"\nCOLOR = rf\"{HEX_COLOR}|{RGB_COLOR}|{HSL_COLOR}\"\nKEY_VALUE = r\"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\\-\\/]+\"\nTOKEN = \"[a-zA-Z][a-zA-Z0-9_-]*\"\nSTRING = r\"\\\".*?\\\"\"\nVARIABLE_REF = r\"\\$[a-zA-Z0-9_\\-]+\"\n\nIDENTIFIER = r\"[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\"\n\n# Values permitted in variable and rule declarations.\nDECLARATION_VALUES = {\n \"scalar\": SCALAR,\n \"duration\": DURATION,\n \"number\": NUMBER,\n \"color\": COLOR,\n \"key_value\": KEY_VALUE,\n \"token\": TOKEN,\n \"string\": STRING,\n \"variable_ref\": VARIABLE_REF,\n}\n\n# The tokenizers \"expectation\" while at the root/highest level of scope\n# in the CSS file. At this level we might expect to see selectors, comments,\n# variable definitions etc.\nexpect_root_scope = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n selector_start_id=r\"\\#\" + IDENTIFIER,\n selector_start_class=r\"\\.\" + IDENTIFIER,\n selector_start_universal=r\"\\*\",\n selector_start=IDENTIFIER,\n variable_name=rf\"{VARIABLE_REF}:\",\n).expect_eof(True)\n\n# After a variable declaration e.g. \"$warning-text: TOKENS;\"\n# for tokenizing variable value ------^~~~~~~^\nexpect_variable_name_continue = Expect(\n variable_value_end=r\"\\n|;\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n).expect_eof(True)\n\nexpect_comment_end = Expect(\n comment_end=re.escape(\"*/\"),\n)\n\n# After we come across a selector in CSS e.g. \".my-class\", we may\n# find other selectors, pseudo-classes... e.g. \".my-class :hover\"\nexpect_selector_continue = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n pseudo_class=r\"\\:[a-zA-Z_-]+\",\n selector_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_universal=r\"\\*\",\n selector=r\"[a-zA-Z_\\-]+\",\n combinator_child=\">\",\n new_selector=r\",\",\n declaration_set_start=r\"\\{\",\n)\n\n# A rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_solo = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n# The value(s)/content from a rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration_content = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_content_solo = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n\nclass TokenizerState:\n \"\"\"State machine for the tokenizer.\n\n Attributes:\n EXPECT: The initial expectation of the tokenizer. Since we start tokenizing\n at the root scope, we might expect to see either a variable or selector, for example.\n STATE_MAP: Maps token names to Expects, defines the sets of valid tokens\n that we'd expect to see next, given the current token. For example, if\n we've just processed a variable declaration name, we next expect to see\n the value of that variable.\n \"\"\"\n\n EXPECT = expect_root_scope\n STATE_MAP = {\n \"variable_name\": expect_variable_name_continue,\n \"variable_value_end\": expect_root_scope,\n \"selector_start\": expect_selector_continue,\n \"selector_start_id\": expect_selector_continue,\n \"selector_start_class\": expect_selector_continue,\n \"selector_start_universal\": expect_selector_continue,\n \"selector_id\": expect_selector_continue,\n \"selector_class\": expect_selector_continue,\n \"selector_universal\": expect_selector_continue,\n \"declaration_set_start\": expect_declaration,\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration,\n \"declaration_set_end\": expect_root_scope,\n }\n\n def __call__(self, code: str, path: str | PurePath) -> Iterable[Token]:\n tokenizer = Tokenizer(code, path=path)\n expect = self.EXPECT\n get_token = tokenizer.get_token\n get_state = self.STATE_MAP.get\n while True:\n token = get_token(expect)\n name = token.name\n if name == \"comment_start\":\n tokenizer.skip_to(expect_comment_end)\n continue\n elif name == \"eof\":\n break\n expect = get_state(name, expect)\n yield token\n\n\nclass DeclarationTokenizerState(TokenizerState):\n EXPECT = expect_declaration_solo\n STATE_MAP = {\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration_solo,\n }\n\n\nclass ValueTokenizerState(TokenizerState):\n EXPECT = expect_declaration_content_solo\n\n\ntokenize = TokenizerState()\ntokenize_declarations = DeclarationTokenizerState()\ntokenize_value = ValueTokenizerState()\n\n\ndef tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:\n \"\"\"Tokens the values in a dict of strings.\n\n Args:\n values (dict[str, str]): A mapping of CSS variable name on to a value, to be\n added to the CSS context.\n\n Returns:\n dict[str, list[Token]]: A mapping of name on to a list of tokens,\n \"\"\"\n value_tokens = {\n name: list(tokenize_value(value, \"__name__\")) for name, value in values.items()\n }\n return value_tokens\n", "path": "src/textual/css/tokenize.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 18c26b55a7..aa06b03c08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Rebuilt `DirectoryTree` with new `Tree` control. +### Fixed + +- Type selectors can now contain numbers https://github.com/Textualize/textual/issues/1253 + ## [0.5.0] - 2022-11-20 ### Added diff --git a/src/textual/css/tokenize.py b/src/textual/css/tokenize.py index e13820fd6e..a1b84b596a 100644 --- a/src/textual/css/tokenize.py +++ b/src/textual/css/tokenize.py @@ -49,7 +49,7 @@ selector_start_id=r"\#" + IDENTIFIER, selector_start_class=r"\." + IDENTIFIER, selector_start_universal=r"\*", - selector_start=r"[a-zA-Z_\-]+", + selector_start=IDENTIFIER, variable_name=rf"{VARIABLE_REF}:", ).expect_eof(True) diff --git a/tests/test_query.py b/tests/test_query.py index 8cc4de653b..96bc6d405d 100644 --- a/tests/test_query.py +++ b/tests/test_query.py @@ -8,12 +8,15 @@ def test_query(): class View(Widget): pass + class View2(View): + pass + class App(Widget): pass app = App() main_view = View(id="main") - help_view = View(id="help") + help_view = View2(id="help") app._add_child(main_view) app._add_child(help_view) @@ -53,6 +56,7 @@ class App(Widget): assert list(app.query("App")) == [app] assert list(app.query("#main")) == [main_view] assert list(app.query("View#main")) == [main_view] + assert list(app.query("View2#help")) == [help_view] assert list(app.query("#widget1")) == [widget1] assert list(app.query("#Widget1")) == [] # Note case. assert list(app.query("#widget2")) == [widget2]
privacyidea__privacyidea-3745
Better label for day password policy The label in the policy for the `daypassword_timestep` should be more helpful. Please replace "text..." with something like "24h". ![image](https://github.com/privacyidea/privacyidea/assets/1908620/2895a46c-3c70-4131-b3bc-eceaa4efbffc)
[ { "content": "# -*- coding: utf-8 -*-\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nimport logging\nimport time\nimport datetime\nfrom privacyidea.lib.tokens.HMAC import HmacOtp\nfrom privacyidea.lib.config import get_from_config\nfrom privacyidea.lib.log import log_with\nfrom privacyidea.lib.tokenclass import TokenClass\nfrom privacyidea.lib.tokens.hotptoken import HotpTokenClass\nfrom privacyidea.lib.decorators import check_token_locked\nfrom privacyidea.lib.policy import ACTION, SCOPE, GROUP, Match\nfrom privacyidea.lib.tokens.totptoken import TotpTokenClass\nfrom privacyidea.lib.utils import determine_logged_in_userparams, parse_time_sec_int\nfrom privacyidea.lib import _\n\nlog = logging.getLogger(__name__)\n\n\nclass DayPasswordTokenClass(TotpTokenClass):\n previous_otp_offset = 0\n\n desc_timestep = _('Specify the time step of the DayPassword token.')\n\n @log_with(log)\n def __init__(self, db_token):\n \"\"\"\n Create a new day password token object from a DB Token object\n\n :param db_token: instance of the orm db object\n :type db_token: orm object\n \"\"\"\n TokenClass.__init__(self, db_token)\n self.set_type(\"daypassword\")\n self.hKeyRequired = True\n\n @staticmethod\n def get_class_type():\n \"\"\"\n return the token type shortname\n\n :return: 'daypassword'\n :rtype: string\n \"\"\"\n return \"daypassword\"\n\n @staticmethod\n def get_class_prefix():\n \"\"\"\n Return the prefix, that is used as a prefix for the serial numbers.\n\n :return: DayPasswordToken\n \"\"\"\n return \"DayPassword\"\n\n @staticmethod\n @log_with(log)\n def get_class_info(key=None, ret='all'):\n \"\"\"\n returns a subtree of the token definition\n\n :param key: subsection identifier\n :type key: string\n :param ret: default return value, if nothing is found\n :type ret: user defined\n :return: subsection if key exists or user defined\n :rtype: dict or scalar\n \"\"\"\n res = {'type': 'daypassword',\n 'title': 'Time based Password',\n 'description': _('DayPassword: A time-based token with a variable timestep and the possibility'\n ' to use the OTP more than once.'),\n 'user': ['enroll'],\n # This tokentype is enrollable in the UI for...\n 'ui_enroll': [SCOPE.ADMIN, SCOPE.USER],\n 'policy': {\n SCOPE.USER: {\n 'timestep': {'type': 'str',\n 'desc': DayPasswordTokenClass.desc_timestep},\n 'hashlib': {'type': 'str',\n 'value': [\"sha1\",\n \"sha256\",\n \"sha512\"],\n 'desc': DayPasswordTokenClass.desc_hash_func},\n 'otplen': {'type': 'int',\n 'value': [6, 8],\n 'desc': DayPasswordTokenClass.desc_otp_len},\n 'force_server_generate': {'type': 'bool',\n 'desc': DayPasswordTokenClass.desc_key_gen}\n },\n SCOPE.ADMIN: {\n 'timestep': {'type': 'str',\n 'desc': DayPasswordTokenClass.desc_timestep},\n 'hashlib': {'type': 'str',\n 'value': [\"sha1\",\n \"sha256\",\n \"sha512\"],\n 'desc': DayPasswordTokenClass.desc_hash_func},\n 'otplen': {'type': 'int',\n 'value': [6, 8],\n 'desc': DayPasswordTokenClass.desc_otp_len},\n 'force_server_generate': {'type': 'bool',\n 'desc': DayPasswordTokenClass.desc_key_gen}\n },\n SCOPE.ENROLL: {\n ACTION.FORCE_APP_PIN: {\n 'type': 'bool',\n 'desc': _('Enforce setting an app pin for the privacyIDEA '\n 'Authenticator App')\n },\n ACTION.MAXTOKENUSER: {\n 'type': 'int',\n 'desc': _(\"The user may only have this maximum \"\n \"number of daypassword tokens assigned.\"),\n 'group': GROUP.TOKEN\n },\n ACTION.MAXACTIVETOKENUSER: {\n 'type': 'int',\n 'desc': _(\"The user may only have this maximum number \"\n \"of active daypassword tokens assigned.\"),\n 'group': GROUP.TOKEN\n }\n\n }\n },\n }\n if key:\n ret = res.get(key, {})\n else:\n if ret == 'all':\n ret = res\n\n return ret\n\n @log_with(log)\n def update(self, param, reset_failcount=True):\n \"\"\"\n This is called during initialization of the token\n to add additional attributes to the token object.\n\n :param param: dict of initialization parameters\n :type param: dict\n\n :return: nothing\n \"\"\"\n HotpTokenClass.update(self, param, reset_failcount=reset_failcount)\n\n timeStep = param.get(\"timeStep\", self.timestep)\n # we support various hashlib methods, but only on create\n # which is effectively set in the update\n hashlibStr = param.get(\"hashlib\", self.hashlib)\n\n self.add_tokeninfo(\"timeStep\", timeStep)\n self.add_tokeninfo(\"hashlib\", hashlibStr)\n\n @property\n def timestep(self):\n timeStepping = parse_time_sec_int(self.get_tokeninfo(\"timeStep\") or\n (get_from_config(\"daypassword.timeStep\")) or \"1d\")\n\n return timeStepping\n\n @property\n def hashlib(self):\n hashlibStr = self.get_tokeninfo(\"hashlib\") or \\\n get_from_config(\"daypassword.hashlib\", 'sha1')\n return hashlibStr\n\n @log_with(log)\n def check_otp_exist(self, otp, options=None, symetric=False,\n inc_counter=True):\n \"\"\"\n checks if the given password value is/are values of this very token at all.\n This is used to autoassign and to determine the serial number of\n a token.\n\n :param otp: the to be verified otp value\n :type otp: string\n :return: counter or -1 if otp does not exist\n :rtype: int\n \"\"\"\n options = options or {}\n res = self.check_otp(otp, options=options)\n\n if inc_counter and res >= 0:\n # As usually the counter is increased in lib.token.checkUserPass,\n # we need to do this manually here:\n self.inc_otp_counter(res)\n return res\n\n @check_token_locked\n def check_otp(self, anOtpVal, counter=None, options=None):\n \"\"\"\n validate the token passwort against a given passwordvalue\n\n :param anOtpVal: the to be verified passwordvalue\n :type anOtpVal: string\n :param counter: the counter state, that should be verified. For DayPasswordToken\n this is the unix system time (seconds) divided by 30/60\n :type counter: int\n :param options: the dict, which could contain token specific info\n :type options: dict\n :return: the counter or -1\n :rtype: int\n \"\"\"\n otplen = int(self.token.otplen)\n options = options or {}\n secretHOtp = self.token.get_otpkey()\n oCount = self.get_otp_count()\n inow = int(time.time())\n\n initTime = int(options.get('initTime', -1))\n if initTime != -1:\n server_time = int(initTime)\n else:\n server_time = time.time()\n\n # If we have a counter from the parameter list\n if not counter:\n # No counter, so we take the current token_time\n counter = self._time2counter(server_time,\n timeStepping=self.timestep)\n\n hmac2Otp = HmacOtp(secretHOtp,\n counter,\n otplen,\n self.get_hashlib(self.hashlib))\n res = hmac2Otp.checkOtp(anOtpVal,\n int(1),\n symetric=False)\n\n if res != -1:\n # on success, we have to save the last attempt\n self.set_otp_count(res)\n\n # here we calculate the new drift/shift between the server time\n # and the tokentime\n tokentime = self._counter2time(res, self.timestep)\n tokenDt = datetime.datetime.fromtimestamp(tokentime / 1.0)\n\n nowDt = datetime.datetime.fromtimestamp(inow / 1.0)\n\n lastauth = self._counter2time(oCount, self.timestep)\n lastauthDt = datetime.datetime.fromtimestamp(lastauth / 1.0)\n\n log.debug(\"last auth : {0!r}\".format(lastauthDt))\n log.debug(\"tokentime : {0!r}\".format(tokenDt))\n log.debug(\"now : {0!r}\".format(nowDt))\n log.debug(\"delta : {0!r}\".format((tokentime - inow)))\n\n return res\n\n def get_otp(self, current_time=None, do_truncation=True,\n time_seconds=None, challenge=None):\n \"\"\"\n get the next OTP value\n\n :param current_time: the current time, for which the OTP value\n should be calculated for.\n :type current_time: datetime object\n :param time_seconds: the current time, for which the OTP value\n should be calculated for (date +%s)\n :type: time_seconds: int, unix system time seconds\n :return: next otp value, and PIN, if possible\n :rtype: tuple\n \"\"\"\n otplen = int(self.token.otplen)\n secretHOtp = self.token.get_otpkey()\n\n hmac2Otp = HmacOtp(secretHOtp,\n self.get_otp_count(),\n otplen,\n self.get_hashlib(self.hashlib))\n\n if time_seconds is None:\n time_seconds = time.time()\n if current_time:\n time_seconds = self._time2float(current_time)\n\n counter = int(time_seconds / self.timestep)\n otpval = hmac2Otp.generate(counter=counter,\n inc_counter=False,\n do_truncation=do_truncation,\n challenge=challenge)\n\n pin = self.token.get_pin()\n combined = \"{0!s}{1!s}\".format(otpval, pin)\n if get_from_config(\"PrependPin\") == \"True\":\n combined = \"{0!s}{1!s}\".format(pin, otpval)\n\n return 1, pin, otpval, combined\n\n @log_with(log)\n def get_multi_otp(self, count=0, epoch_start=0, epoch_end=0,\n curTime=None, timestamp=None):\n \"\"\"\n return a dictionary of multiple future OTP values\n of the HOTP/HMAC token\n\n :param count: how many otp values should be returned\n :type count: int\n :param epoch_start: not implemented\n :param epoch_end: not implemented\n :param curTime: Simulate the servertime\n :type curTime: datetime.datetime\n :param timestamp: Simulate the servertime\n :type timestamp: epoch time\n :return: tuple of status: boolean, error: text and the OTP dictionary\n\n \"\"\"\n otp_dict = {\"type\": \"DayPasswordToken\", \"otp\": {}}\n ret = False\n error = \"No count specified\"\n\n otplen = int(self.token.otplen)\n secretHOtp = self.token.get_otpkey()\n\n hmac2Otp = HmacOtp(secretHOtp, self.get_otp_count(),\n otplen, self.get_hashlib(self.hashlib))\n\n if curTime:\n # datetime object provided for simulation\n tCounter = self._time2float(curTime)\n elif timestamp:\n # epoch time provided for simulation\n tCounter = int(timestamp)\n else:\n # use the current server time\n tCounter = int(time.time())\n\n counter = int(tCounter / self.timestep)\n\n if count > 0:\n error = \"OK\"\n for i in range(0, count):\n otpval = hmac2Otp.generate(counter=counter + i,\n inc_counter=False)\n timeCounter = (counter + i) * self.timestep\n\n val_time = datetime.datetime. \\\n fromtimestamp(timeCounter).strftime(\"%Y-%m-%d %H:%M:%S\")\n otp_dict[\"otp\"][counter + i] = {'otpval': otpval,\n 'time': val_time}\n ret = True\n\n return ret, error, otp_dict\n\n @staticmethod\n def get_setting_type(key):\n settings = {\"daypassword.hashlib\": \"public\",\n \"daypassword.timeStep\": \"public\"}\n return settings.get(key, \"\")\n\n @classmethod\n def get_default_settings(cls, g, params):\n \"\"\"\n This method returns a dictionary with default settings for token\n enrollment.\n These default settings are defined in SCOPE.USER or SCOPE.ADMIN and are\n daypassword_hashlib, daypassword_timestep and daypassword_otplen.\n If these are set, the user or admin will only be able to enroll tokens\n with these values.\n\n The returned dictionary is added to the parameters of the API call.\n\n :param g: context object, see documentation of ``Match``\n :param params: The call parameters\n :type params: dict\n :return: default parameters\n \"\"\"\n ret = {}\n if not g.logged_in_user:\n return ret\n (role, username, userrealm, adminuser, adminrealm) = determine_logged_in_userparams(g.logged_in_user,\n params)\n hashlib_pol = Match.generic(g, scope=role,\n action=\"daypassword_hashlib\",\n user=username,\n realm=userrealm,\n adminuser=adminuser,\n adminrealm=adminrealm).action_values(unique=True)\n if hashlib_pol:\n ret[\"hashlib\"] = list(hashlib_pol)[0]\n\n timestep_pol = Match.generic(g, scope=role,\n action=\"daypassword_timestep\",\n user=username,\n realm=userrealm,\n adminuser=adminuser,\n adminrealm=adminrealm).action_values(unique=True)\n if timestep_pol:\n ret[\"timeStep\"] = list(timestep_pol)[0]\n\n otplen_pol = Match.generic(g, scope=role,\n action=\"daypassword_otplen\",\n user=username,\n realm=userrealm,\n adminuser=adminuser,\n adminrealm=adminrealm).action_values(unique=True)\n if otplen_pol:\n ret[\"otplen\"] = list(otplen_pol)[0]\n\n return ret\n\n @staticmethod\n def get_import_csv(l):\n \"\"\"\n Read the list from a csv file and return a dictionary, that can be used\n to do a token_init.\n\n :param l: The list of the line of a csv file\n :type l: list\n :return: A dictionary of init params\n \"\"\"\n params = TokenClass.get_import_csv(l)\n # timeStep\n if len(l) >= 5:\n params[\"timeStep\"] = int(l[4].strip())\n else:\n params[\"timeStep\"] = 30\n\n return params\n", "path": "privacyidea/lib/tokens/daypasswordtoken.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nimport logging\nimport time\nimport datetime\nfrom privacyidea.lib.tokens.HMAC import HmacOtp\nfrom privacyidea.lib.config import get_from_config\nfrom privacyidea.lib.log import log_with\nfrom privacyidea.lib.tokenclass import TokenClass\nfrom privacyidea.lib.tokens.hotptoken import HotpTokenClass\nfrom privacyidea.lib.decorators import check_token_locked\nfrom privacyidea.lib.policy import ACTION, SCOPE, GROUP, Match\nfrom privacyidea.lib.tokens.totptoken import TotpTokenClass\nfrom privacyidea.lib.utils import determine_logged_in_userparams, parse_time_sec_int\nfrom privacyidea.lib import _\n\nlog = logging.getLogger(__name__)\n\n\nclass DayPasswordTokenClass(TotpTokenClass):\n previous_otp_offset = 0\n\n desc_timestep = _('Specify the time step of the DayPassword token. For example: \"24h\"')\n\n @log_with(log)\n def __init__(self, db_token):\n \"\"\"\n Create a new day password token object from a DB Token object\n\n :param db_token: instance of the orm db object\n :type db_token: orm object\n \"\"\"\n TokenClass.__init__(self, db_token)\n self.set_type(\"daypassword\")\n self.hKeyRequired = True\n\n @staticmethod\n def get_class_type():\n \"\"\"\n return the token type shortname\n\n :return: 'daypassword'\n :rtype: string\n \"\"\"\n return \"daypassword\"\n\n @staticmethod\n def get_class_prefix():\n \"\"\"\n Return the prefix, that is used as a prefix for the serial numbers.\n\n :return: DayPasswordToken\n \"\"\"\n return \"DayPassword\"\n\n @staticmethod\n @log_with(log)\n def get_class_info(key=None, ret='all'):\n \"\"\"\n returns a subtree of the token definition\n\n :param key: subsection identifier\n :type key: string\n :param ret: default return value, if nothing is found\n :type ret: user defined\n :return: subsection if key exists or user defined\n :rtype: dict or scalar\n \"\"\"\n res = {'type': 'daypassword',\n 'title': 'Time based Password',\n 'description': _('DayPassword: A time-based token with a variable timestep and the possibility'\n ' to use the OTP more than once.'),\n 'user': ['enroll'],\n # This tokentype is enrollable in the UI for...\n 'ui_enroll': [SCOPE.ADMIN, SCOPE.USER],\n 'policy': {\n SCOPE.USER: {\n 'timestep': {'type': 'str',\n 'desc': DayPasswordTokenClass.desc_timestep},\n 'hashlib': {'type': 'str',\n 'value': [\"sha1\",\n \"sha256\",\n \"sha512\"],\n 'desc': DayPasswordTokenClass.desc_hash_func},\n 'otplen': {'type': 'int',\n 'value': [6, 8],\n 'desc': DayPasswordTokenClass.desc_otp_len},\n 'force_server_generate': {'type': 'bool',\n 'desc': DayPasswordTokenClass.desc_key_gen}\n },\n SCOPE.ADMIN: {\n 'timestep': {'type': 'str',\n 'desc': DayPasswordTokenClass.desc_timestep},\n 'hashlib': {'type': 'str',\n 'value': [\"sha1\",\n \"sha256\",\n \"sha512\"],\n 'desc': DayPasswordTokenClass.desc_hash_func},\n 'otplen': {'type': 'int',\n 'value': [6, 8],\n 'desc': DayPasswordTokenClass.desc_otp_len},\n 'force_server_generate': {'type': 'bool',\n 'desc': DayPasswordTokenClass.desc_key_gen}\n },\n SCOPE.ENROLL: {\n ACTION.FORCE_APP_PIN: {\n 'type': 'bool',\n 'desc': _('Enforce setting an app pin for the privacyIDEA '\n 'Authenticator App')\n },\n ACTION.MAXTOKENUSER: {\n 'type': 'int',\n 'desc': _(\"The user may only have this maximum \"\n \"number of daypassword tokens assigned.\"),\n 'group': GROUP.TOKEN\n },\n ACTION.MAXACTIVETOKENUSER: {\n 'type': 'int',\n 'desc': _(\"The user may only have this maximum number \"\n \"of active daypassword tokens assigned.\"),\n 'group': GROUP.TOKEN\n }\n\n }\n },\n }\n if key:\n ret = res.get(key, {})\n else:\n if ret == 'all':\n ret = res\n\n return ret\n\n @log_with(log)\n def update(self, param, reset_failcount=True):\n \"\"\"\n This is called during initialization of the token\n to add additional attributes to the token object.\n\n :param param: dict of initialization parameters\n :type param: dict\n\n :return: nothing\n \"\"\"\n HotpTokenClass.update(self, param, reset_failcount=reset_failcount)\n\n timeStep = param.get(\"timeStep\", self.timestep)\n # we support various hashlib methods, but only on create\n # which is effectively set in the update\n hashlibStr = param.get(\"hashlib\", self.hashlib)\n\n self.add_tokeninfo(\"timeStep\", timeStep)\n self.add_tokeninfo(\"hashlib\", hashlibStr)\n\n @property\n def timestep(self):\n timeStepping = parse_time_sec_int(self.get_tokeninfo(\"timeStep\") or\n (get_from_config(\"daypassword.timeStep\")) or \"1d\")\n\n return timeStepping\n\n @property\n def hashlib(self):\n hashlibStr = self.get_tokeninfo(\"hashlib\") or \\\n get_from_config(\"daypassword.hashlib\", 'sha1')\n return hashlibStr\n\n @log_with(log)\n def check_otp_exist(self, otp, options=None, symetric=False,\n inc_counter=True):\n \"\"\"\n checks if the given password value is/are values of this very token at all.\n This is used to autoassign and to determine the serial number of\n a token.\n\n :param otp: the to be verified otp value\n :type otp: string\n :return: counter or -1 if otp does not exist\n :rtype: int\n \"\"\"\n options = options or {}\n res = self.check_otp(otp, options=options)\n\n if inc_counter and res >= 0:\n # As usually the counter is increased in lib.token.checkUserPass,\n # we need to do this manually here:\n self.inc_otp_counter(res)\n return res\n\n @check_token_locked\n def check_otp(self, anOtpVal, counter=None, options=None):\n \"\"\"\n validate the token passwort against a given passwordvalue\n\n :param anOtpVal: the to be verified passwordvalue\n :type anOtpVal: string\n :param counter: the counter state, that should be verified. For DayPasswordToken\n this is the unix system time (seconds) divided by 30/60\n :type counter: int\n :param options: the dict, which could contain token specific info\n :type options: dict\n :return: the counter or -1\n :rtype: int\n \"\"\"\n otplen = int(self.token.otplen)\n options = options or {}\n secretHOtp = self.token.get_otpkey()\n oCount = self.get_otp_count()\n inow = int(time.time())\n\n initTime = int(options.get('initTime', -1))\n if initTime != -1:\n server_time = int(initTime)\n else:\n server_time = time.time()\n\n # If we have a counter from the parameter list\n if not counter:\n # No counter, so we take the current token_time\n counter = self._time2counter(server_time,\n timeStepping=self.timestep)\n\n hmac2Otp = HmacOtp(secretHOtp,\n counter,\n otplen,\n self.get_hashlib(self.hashlib))\n res = hmac2Otp.checkOtp(anOtpVal,\n int(1),\n symetric=False)\n\n if res != -1:\n # on success, we have to save the last attempt\n self.set_otp_count(res)\n\n # here we calculate the new drift/shift between the server time\n # and the tokentime\n tokentime = self._counter2time(res, self.timestep)\n tokenDt = datetime.datetime.fromtimestamp(tokentime / 1.0)\n\n nowDt = datetime.datetime.fromtimestamp(inow / 1.0)\n\n lastauth = self._counter2time(oCount, self.timestep)\n lastauthDt = datetime.datetime.fromtimestamp(lastauth / 1.0)\n\n log.debug(\"last auth : {0!r}\".format(lastauthDt))\n log.debug(\"tokentime : {0!r}\".format(tokenDt))\n log.debug(\"now : {0!r}\".format(nowDt))\n log.debug(\"delta : {0!r}\".format((tokentime - inow)))\n\n return res\n\n def get_otp(self, current_time=None, do_truncation=True,\n time_seconds=None, challenge=None):\n \"\"\"\n get the next OTP value\n\n :param current_time: the current time, for which the OTP value\n should be calculated for.\n :type current_time: datetime object\n :param time_seconds: the current time, for which the OTP value\n should be calculated for (date +%s)\n :type: time_seconds: int, unix system time seconds\n :return: next otp value, and PIN, if possible\n :rtype: tuple\n \"\"\"\n otplen = int(self.token.otplen)\n secretHOtp = self.token.get_otpkey()\n\n hmac2Otp = HmacOtp(secretHOtp,\n self.get_otp_count(),\n otplen,\n self.get_hashlib(self.hashlib))\n\n if time_seconds is None:\n time_seconds = time.time()\n if current_time:\n time_seconds = self._time2float(current_time)\n\n counter = int(time_seconds / self.timestep)\n otpval = hmac2Otp.generate(counter=counter,\n inc_counter=False,\n do_truncation=do_truncation,\n challenge=challenge)\n\n pin = self.token.get_pin()\n combined = \"{0!s}{1!s}\".format(otpval, pin)\n if get_from_config(\"PrependPin\") == \"True\":\n combined = \"{0!s}{1!s}\".format(pin, otpval)\n\n return 1, pin, otpval, combined\n\n @log_with(log)\n def get_multi_otp(self, count=0, epoch_start=0, epoch_end=0,\n curTime=None, timestamp=None):\n \"\"\"\n return a dictionary of multiple future OTP values\n of the HOTP/HMAC token\n\n :param count: how many otp values should be returned\n :type count: int\n :param epoch_start: not implemented\n :param epoch_end: not implemented\n :param curTime: Simulate the servertime\n :type curTime: datetime.datetime\n :param timestamp: Simulate the servertime\n :type timestamp: epoch time\n :return: tuple of status: boolean, error: text and the OTP dictionary\n\n \"\"\"\n otp_dict = {\"type\": \"DayPasswordToken\", \"otp\": {}}\n ret = False\n error = \"No count specified\"\n\n otplen = int(self.token.otplen)\n secretHOtp = self.token.get_otpkey()\n\n hmac2Otp = HmacOtp(secretHOtp, self.get_otp_count(),\n otplen, self.get_hashlib(self.hashlib))\n\n if curTime:\n # datetime object provided for simulation\n tCounter = self._time2float(curTime)\n elif timestamp:\n # epoch time provided for simulation\n tCounter = int(timestamp)\n else:\n # use the current server time\n tCounter = int(time.time())\n\n counter = int(tCounter / self.timestep)\n\n if count > 0:\n error = \"OK\"\n for i in range(0, count):\n otpval = hmac2Otp.generate(counter=counter + i,\n inc_counter=False)\n timeCounter = (counter + i) * self.timestep\n\n val_time = datetime.datetime. \\\n fromtimestamp(timeCounter).strftime(\"%Y-%m-%d %H:%M:%S\")\n otp_dict[\"otp\"][counter + i] = {'otpval': otpval,\n 'time': val_time}\n ret = True\n\n return ret, error, otp_dict\n\n @staticmethod\n def get_setting_type(key):\n settings = {\"daypassword.hashlib\": \"public\",\n \"daypassword.timeStep\": \"public\"}\n return settings.get(key, \"\")\n\n @classmethod\n def get_default_settings(cls, g, params):\n \"\"\"\n This method returns a dictionary with default settings for token\n enrollment.\n These default settings are defined in SCOPE.USER or SCOPE.ADMIN and are\n daypassword_hashlib, daypassword_timestep and daypassword_otplen.\n If these are set, the user or admin will only be able to enroll tokens\n with these values.\n\n The returned dictionary is added to the parameters of the API call.\n\n :param g: context object, see documentation of ``Match``\n :param params: The call parameters\n :type params: dict\n :return: default parameters\n \"\"\"\n ret = {}\n if not g.logged_in_user:\n return ret\n (role, username, userrealm, adminuser, adminrealm) = determine_logged_in_userparams(g.logged_in_user,\n params)\n hashlib_pol = Match.generic(g, scope=role,\n action=\"daypassword_hashlib\",\n user=username,\n realm=userrealm,\n adminuser=adminuser,\n adminrealm=adminrealm).action_values(unique=True)\n if hashlib_pol:\n ret[\"hashlib\"] = list(hashlib_pol)[0]\n\n timestep_pol = Match.generic(g, scope=role,\n action=\"daypassword_timestep\",\n user=username,\n realm=userrealm,\n adminuser=adminuser,\n adminrealm=adminrealm).action_values(unique=True)\n if timestep_pol:\n ret[\"timeStep\"] = list(timestep_pol)[0]\n\n otplen_pol = Match.generic(g, scope=role,\n action=\"daypassword_otplen\",\n user=username,\n realm=userrealm,\n adminuser=adminuser,\n adminrealm=adminrealm).action_values(unique=True)\n if otplen_pol:\n ret[\"otplen\"] = list(otplen_pol)[0]\n\n return ret\n\n @staticmethod\n def get_import_csv(l):\n \"\"\"\n Read the list from a csv file and return a dictionary, that can be used\n to do a token_init.\n\n :param l: The list of the line of a csv file\n :type l: list\n :return: A dictionary of init params\n \"\"\"\n params = TokenClass.get_import_csv(l)\n # timeStep\n if len(l) >= 5:\n params[\"timeStep\"] = int(l[4].strip())\n else:\n params[\"timeStep\"] = 30\n\n return params\n", "path": "privacyidea/lib/tokens/daypasswordtoken.py" } ]
diff --git a/privacyidea/lib/tokens/daypasswordtoken.py b/privacyidea/lib/tokens/daypasswordtoken.py index 1eecff1cb5..555844a5e9 100644 --- a/privacyidea/lib/tokens/daypasswordtoken.py +++ b/privacyidea/lib/tokens/daypasswordtoken.py @@ -33,7 +33,7 @@ class DayPasswordTokenClass(TotpTokenClass): previous_otp_offset = 0 - desc_timestep = _('Specify the time step of the DayPassword token.') + desc_timestep = _('Specify the time step of the DayPassword token. For example: "24h"') @log_with(log) def __init__(self, db_token):
ipython__ipython-9202
IPythonLexer test failure with pygments 2.1 Jenkins is showing us a test failure on Windows: https://jenkins.jupyter.org/job/windows-multi/_pyversion=3.5,label=windows/504/testReport/ It looks like `$HOME` is unexpectedly being split into two tokens. I suspect it's failing since pygments 2.1 was released. I see the same locally on Linux, but it's not failing on Travis (maybe we're not installing pygments so those tests are skipped?). @abalkin and @chebee7i have done the most signficant work on the lexer machinery. Any ideas?
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Setup script for IPython.\n\nUnder Posix environments it works like a typical setup.py script.\nUnder Windows, the command sdist is not supported, since IPython\nrequires utilities which are not available under Windows.\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2008-2011, IPython Development Team.\n# Copyright (c) 2001-2007, Fernando Perez <[email protected]>\n# Copyright (c) 2001, Janko Hauser <[email protected]>\n# Copyright (c) 2001, Nathaniel Gray <[email protected]>\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.rst, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\nimport sys\n\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\n# At least we're on the python version we need, move on.\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\n# Stdlib imports\nimport os\n\nfrom glob import glob\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\nfrom distutils.core import setup\n\n# Our own imports\nfrom setupbase import target_update\n\nfrom setupbase import (\n setup_args,\n find_packages,\n find_package_data,\n check_package_data_first,\n find_entry_points,\n build_scripts_entrypt,\n find_data_files,\n git_prebuild,\n install_symlinked,\n install_lib_symlink,\n install_scripts_for_symlink,\n unsymlink,\n)\n\nisfile = os.path.isfile\npjoin = os.path.join\n\n#-------------------------------------------------------------------------------\n# Handle OS specific things\n#-------------------------------------------------------------------------------\n\nif os.name in ('nt','dos'):\n os_name = 'windows'\nelse:\n os_name = os.name\n\n# Under Windows, 'sdist' has not been supported. Now that the docs build with\n# Sphinx it might work, but let's not turn it on until someone confirms that it\n# actually works.\nif os_name == 'windows' and 'sdist' in sys.argv:\n print('The sdist command is not available under Windows. Exiting.')\n sys.exit(1)\n\n\n#-------------------------------------------------------------------------------\n# Things related to the IPython documentation\n#-------------------------------------------------------------------------------\n\n# update the manuals when building a source dist\nif len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):\n\n # List of things to be updated. Each entry is a triplet of args for\n # target_update()\n to_update = [\n ('docs/man/ipython.1.gz',\n ['docs/man/ipython.1'],\n 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),\n ]\n\n\n [ target_update(*t) for t in to_update ]\n\n#---------------------------------------------------------------------------\n# Find all the packages, package data, and data_files\n#---------------------------------------------------------------------------\n\npackages = find_packages()\npackage_data = find_package_data()\n\ndata_files = find_data_files()\n\nsetup_args['packages'] = packages\nsetup_args['package_data'] = package_data\nsetup_args['data_files'] = data_files\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n# imports here, so they are after setuptools import if there was one\nfrom distutils.command.sdist import sdist\nfrom distutils.command.upload import upload\n\nclass UploadWindowsInstallers(upload):\n\n description = \"Upload Windows installers to PyPI (only used from tools/release_windows.py)\"\n user_options = upload.user_options + [\n ('files=', 'f', 'exe file (or glob) to upload')\n ]\n def initialize_options(self):\n upload.initialize_options(self)\n meta = self.distribution.metadata\n base = '{name}-{version}'.format(\n name=meta.get_name(),\n version=meta.get_version()\n )\n self.files = os.path.join('dist', '%s.*.exe' % base)\n\n def run(self):\n for dist_file in glob(self.files):\n self.upload_file('bdist_wininst', 'any', dist_file)\n\nsetup_args['cmdclass'] = {\n 'build_py': \\\n check_package_data_first(git_prebuild('IPython')),\n 'sdist' : git_prebuild('IPython', sdist),\n 'upload_wininst' : UploadWindowsInstallers,\n 'symlink': install_symlinked,\n 'install_lib_symlink': install_lib_symlink,\n 'install_scripts_sym': install_scripts_for_symlink,\n 'unsymlink': unsymlink,\n}\n\n\n#---------------------------------------------------------------------------\n# Handle scripts, dependencies, and setuptools specific things\n#---------------------------------------------------------------------------\n\n# For some commands, use setuptools. Note that we do NOT list install here!\n# If you want a setuptools-enhanced install, just run 'setupegg.py install'\nneeds_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',\n 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',\n 'egg_info', 'easy_install', 'upload', 'install_egg_info',\n ))\n\nif len(needs_setuptools.intersection(sys.argv)) > 0:\n import setuptools\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\n# setuptools requirements\n\nextras_require = dict(\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.3'],\n test = ['nose>=0.10.1', 'requests', 'testpath'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n notebook = ['notebook', 'ipywidgets'],\n nbconvert = ['nbconvert'],\n)\ninstall_requires = [\n 'setuptools>=18.5',\n 'decorator',\n 'pickleshare',\n 'simplegeneric>0.8',\n 'traitlets',\n]\n\n# Platform-specific dependencies:\n# This is the correct way to specify these,\n# but requires pip >= 6. pip < 6 ignores these.\n\nextras_require.update({\n ':sys_platform != \"win32\"': ['pexpect'],\n ':sys_platform == \"darwin\"': ['appnope'],\n ':sys_platform == \"darwin\" and platform_python_implementation == \"CPython\"': ['gnureadline'],\n 'terminal:sys_platform == \"win32\"': ['pyreadline>=2'],\n 'test:python_version == \"2.7\"': ['mock'],\n})\n# FIXME: re-specify above platform dependencies for pip < 6\n# These would result in non-portable bdists.\nif not any(arg.startswith('bdist') for arg in sys.argv):\n if sys.version_info < (3, 3):\n extras_require['test'].append('mock')\n\n if sys.platform == 'darwin':\n install_requires.extend(['appnope'])\n have_readline = False\n try:\n import readline\n except ImportError:\n pass\n else:\n if 'libedit' not in readline.__doc__:\n have_readline = True\n if not have_readline:\n install_requires.extend(['gnureadline'])\n\n if sys.platform.startswith('win'):\n extras_require['terminal'].append('pyreadline>=2.0')\n else:\n install_requires.append('pexpect')\n \n # workaround pypa/setuptools#147, where setuptools misspells\n # platform_python_implementation as python_implementation\n if 'setuptools' in sys.modules:\n for key in list(extras_require):\n if 'platform_python_implementation' in key:\n new_key = key.replace('platform_python_implementation', 'python_implementation')\n extras_require[new_key] = extras_require.pop(key)\n\neverything = set()\nfor key, deps in extras_require.items():\n if ':' not in key:\n everything.update(deps)\nextras_require['all'] = everything\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': find_entry_points(),\n 'pygments.lexers': [\n 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',\n 'ipython = IPython.lib.lexers:IPythonLexer',\n 'ipython3 = IPython.lib.lexers:IPython3Lexer',\n ],\n }\n setup_args['extras_require'] = extras_require\n requires = setup_args['install_requires'] = install_requires\n\n # Script to be run by the windows binary installer after the default setup\n # routine, to add shortcuts and similar windows-only things. Windows\n # post-install scripts MUST reside in the scripts/ dir, otherwise distutils\n # doesn't find them.\n if 'bdist_wininst' in sys.argv:\n if len(sys.argv) > 2 and \\\n ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):\n print(\"ERROR: bdist_wininst must be run alone. Exiting.\", file=sys.stderr)\n sys.exit(1)\n setup_args['data_files'].append(\n ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])\n setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]\n setup_args['options'] = {\"bdist_wininst\":\n {\"install_script\":\n \"ipython_win_post_install.py\"}}\n\nelse:\n # scripts has to be a non-empty list, or install_scripts isn't called\n setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]\n\n setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt\n\n#---------------------------------------------------------------------------\n# Do the actual setup now\n#---------------------------------------------------------------------------\n\nsetup_args.update(setuptools_extra_args)\n\n\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Setup script for IPython.\n\nUnder Posix environments it works like a typical setup.py script.\nUnder Windows, the command sdist is not supported, since IPython\nrequires utilities which are not available under Windows.\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2008-2011, IPython Development Team.\n# Copyright (c) 2001-2007, Fernando Perez <[email protected]>\n# Copyright (c) 2001, Janko Hauser <[email protected]>\n# Copyright (c) 2001, Nathaniel Gray <[email protected]>\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.rst, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\nimport sys\n\n# This check is also made in IPython/__init__, don't forget to update both when\n# changing Python version requirements.\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: IPython requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\nPY3 = (sys.version_info[0] >= 3)\n\n# At least we're on the python version we need, move on.\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\n# Stdlib imports\nimport os\n\nfrom glob import glob\n\n# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly\n# update it when the contents of directories change.\nif os.path.exists('MANIFEST'): os.remove('MANIFEST')\n\nfrom distutils.core import setup\n\n# Our own imports\nfrom setupbase import target_update\n\nfrom setupbase import (\n setup_args,\n find_packages,\n find_package_data,\n check_package_data_first,\n find_entry_points,\n build_scripts_entrypt,\n find_data_files,\n git_prebuild,\n install_symlinked,\n install_lib_symlink,\n install_scripts_for_symlink,\n unsymlink,\n)\n\nisfile = os.path.isfile\npjoin = os.path.join\n\n#-------------------------------------------------------------------------------\n# Handle OS specific things\n#-------------------------------------------------------------------------------\n\nif os.name in ('nt','dos'):\n os_name = 'windows'\nelse:\n os_name = os.name\n\n# Under Windows, 'sdist' has not been supported. Now that the docs build with\n# Sphinx it might work, but let's not turn it on until someone confirms that it\n# actually works.\nif os_name == 'windows' and 'sdist' in sys.argv:\n print('The sdist command is not available under Windows. Exiting.')\n sys.exit(1)\n\n\n#-------------------------------------------------------------------------------\n# Things related to the IPython documentation\n#-------------------------------------------------------------------------------\n\n# update the manuals when building a source dist\nif len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):\n\n # List of things to be updated. Each entry is a triplet of args for\n # target_update()\n to_update = [\n ('docs/man/ipython.1.gz',\n ['docs/man/ipython.1'],\n 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),\n ]\n\n\n [ target_update(*t) for t in to_update ]\n\n#---------------------------------------------------------------------------\n# Find all the packages, package data, and data_files\n#---------------------------------------------------------------------------\n\npackages = find_packages()\npackage_data = find_package_data()\n\ndata_files = find_data_files()\n\nsetup_args['packages'] = packages\nsetup_args['package_data'] = package_data\nsetup_args['data_files'] = data_files\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n# imports here, so they are after setuptools import if there was one\nfrom distutils.command.sdist import sdist\nfrom distutils.command.upload import upload\n\nclass UploadWindowsInstallers(upload):\n\n description = \"Upload Windows installers to PyPI (only used from tools/release_windows.py)\"\n user_options = upload.user_options + [\n ('files=', 'f', 'exe file (or glob) to upload')\n ]\n def initialize_options(self):\n upload.initialize_options(self)\n meta = self.distribution.metadata\n base = '{name}-{version}'.format(\n name=meta.get_name(),\n version=meta.get_version()\n )\n self.files = os.path.join('dist', '%s.*.exe' % base)\n\n def run(self):\n for dist_file in glob(self.files):\n self.upload_file('bdist_wininst', 'any', dist_file)\n\nsetup_args['cmdclass'] = {\n 'build_py': \\\n check_package_data_first(git_prebuild('IPython')),\n 'sdist' : git_prebuild('IPython', sdist),\n 'upload_wininst' : UploadWindowsInstallers,\n 'symlink': install_symlinked,\n 'install_lib_symlink': install_lib_symlink,\n 'install_scripts_sym': install_scripts_for_symlink,\n 'unsymlink': unsymlink,\n}\n\n\n#---------------------------------------------------------------------------\n# Handle scripts, dependencies, and setuptools specific things\n#---------------------------------------------------------------------------\n\n# For some commands, use setuptools. Note that we do NOT list install here!\n# If you want a setuptools-enhanced install, just run 'setupegg.py install'\nneeds_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',\n 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',\n 'egg_info', 'easy_install', 'upload', 'install_egg_info',\n ))\n\nif len(needs_setuptools.intersection(sys.argv)) > 0:\n import setuptools\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\n# setuptools requirements\n\nextras_require = dict(\n parallel = ['ipyparallel'],\n qtconsole = ['qtconsole'],\n doc = ['Sphinx>=1.3'],\n test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments'],\n terminal = [],\n kernel = ['ipykernel'],\n nbformat = ['nbformat'],\n notebook = ['notebook', 'ipywidgets'],\n nbconvert = ['nbconvert'],\n)\ninstall_requires = [\n 'setuptools>=18.5',\n 'decorator',\n 'pickleshare',\n 'simplegeneric>0.8',\n 'traitlets',\n]\n\n# Platform-specific dependencies:\n# This is the correct way to specify these,\n# but requires pip >= 6. pip < 6 ignores these.\n\nextras_require.update({\n ':sys_platform != \"win32\"': ['pexpect'],\n ':sys_platform == \"darwin\"': ['appnope'],\n ':sys_platform == \"darwin\" and platform_python_implementation == \"CPython\"': ['gnureadline'],\n 'terminal:sys_platform == \"win32\"': ['pyreadline>=2'],\n 'test:python_version == \"2.7\"': ['mock'],\n})\n# FIXME: re-specify above platform dependencies for pip < 6\n# These would result in non-portable bdists.\nif not any(arg.startswith('bdist') for arg in sys.argv):\n if sys.version_info < (3, 3):\n extras_require['test'].append('mock')\n\n if sys.platform == 'darwin':\n install_requires.extend(['appnope'])\n have_readline = False\n try:\n import readline\n except ImportError:\n pass\n else:\n if 'libedit' not in readline.__doc__:\n have_readline = True\n if not have_readline:\n install_requires.extend(['gnureadline'])\n\n if sys.platform.startswith('win'):\n extras_require['terminal'].append('pyreadline>=2.0')\n else:\n install_requires.append('pexpect')\n \n # workaround pypa/setuptools#147, where setuptools misspells\n # platform_python_implementation as python_implementation\n if 'setuptools' in sys.modules:\n for key in list(extras_require):\n if 'platform_python_implementation' in key:\n new_key = key.replace('platform_python_implementation', 'python_implementation')\n extras_require[new_key] = extras_require.pop(key)\n\neverything = set()\nfor key, deps in extras_require.items():\n if ':' not in key:\n everything.update(deps)\nextras_require['all'] = everything\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': find_entry_points(),\n 'pygments.lexers': [\n 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',\n 'ipython = IPython.lib.lexers:IPythonLexer',\n 'ipython3 = IPython.lib.lexers:IPython3Lexer',\n ],\n }\n setup_args['extras_require'] = extras_require\n requires = setup_args['install_requires'] = install_requires\n\n # Script to be run by the windows binary installer after the default setup\n # routine, to add shortcuts and similar windows-only things. Windows\n # post-install scripts MUST reside in the scripts/ dir, otherwise distutils\n # doesn't find them.\n if 'bdist_wininst' in sys.argv:\n if len(sys.argv) > 2 and \\\n ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):\n print(\"ERROR: bdist_wininst must be run alone. Exiting.\", file=sys.stderr)\n sys.exit(1)\n setup_args['data_files'].append(\n ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])\n setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]\n setup_args['options'] = {\"bdist_wininst\":\n {\"install_script\":\n \"ipython_win_post_install.py\"}}\n\nelse:\n # scripts has to be a non-empty list, or install_scripts isn't called\n setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]\n\n setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt\n\n#---------------------------------------------------------------------------\n# Do the actual setup now\n#---------------------------------------------------------------------------\n\nsetup_args.update(setuptools_extra_args)\n\n\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py" } ]
diff --git a/IPython/lib/tests/test_lexers.py b/IPython/lib/tests/test_lexers.py index 0f795504314..bb2de2e5f8e 100644 --- a/IPython/lib/tests/test_lexers.py +++ b/IPython/lib/tests/test_lexers.py @@ -5,6 +5,7 @@ from unittest import TestCase from pygments.token import Token +from pygments.lexers import BashLexer from .. import lexers @@ -13,16 +14,14 @@ class TestLexers(TestCase): """Collection of lexers tests""" def setUp(self): self.lexer = lexers.IPythonLexer() + self.bash_lexer = BashLexer() def testIPythonLexer(self): fragment = '!echo $HOME\n' tokens = [ (Token.Operator, '!'), - (Token.Name.Builtin, 'echo'), - (Token.Text, ' '), - (Token.Name.Variable, '$HOME'), - (Token.Text, '\n'), ] + tokens.extend(self.bash_lexer.get_tokens(fragment[1:])) self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) fragment_2 = '!' + fragment diff --git a/setup.py b/setup.py index 39233e72bf0..45fb2fdc074 100755 --- a/setup.py +++ b/setup.py @@ -182,7 +182,7 @@ def run(self): parallel = ['ipyparallel'], qtconsole = ['qtconsole'], doc = ['Sphinx>=1.3'], - test = ['nose>=0.10.1', 'requests', 'testpath'], + test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments'], terminal = [], kernel = ['ipykernel'], nbformat = ['nbformat'],
sanic-org__sanic-2183
Request streaming results in a phantom 503 When streaming a request body, you end up with a phantom 503 response. To the client, everything looks fine. The data is transmitted, and a response received OK. ``` [2021-07-05 22:45:47 +0300] - (sanic.access)[INFO][127.0.0.1:34264]: POST http://localhost:9999/upload 201 4 [2021-07-05 22:45:47 +0300] - (sanic.access)[INFO][127.0.0.1:34264]: POST http://localhost:9999/upload 503 666 [2021-07-05 22:45:47 +0300] [686804] [ERROR] Connection lost before response written @ ('127.0.0.1', 34264) <Request: POST /upload> ``` But, there is an extra 503 that is caused by a task cancel while waiting on `receive_more`. This appears to be caused by leaving one extra CRLF in the buffer.
[ { "content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional\n\n\nif TYPE_CHECKING:\n from sanic.request import Request\n from sanic.response import BaseHTTPResponse\n\nfrom asyncio import CancelledError, sleep\nfrom enum import Enum\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import (\n HeaderExpectationFailed,\n InvalidUsage,\n PayloadTooLarge,\n ServerError,\n ServiceUnavailable,\n)\nfrom sanic.headers import format_http1_response\nfrom sanic.helpers import has_message_body\nfrom sanic.log import access_logger, error_logger, logger\n\n\nclass Stage(Enum):\n \"\"\"\n Enum for representing the stage of the request/response cycle\n\n | ``IDLE`` Waiting for request\n | ``REQUEST`` Request headers being received\n | ``HANDLER`` Headers done, handler running\n | ``RESPONSE`` Response headers sent, body in progress\n | ``FAILED`` Unrecoverable state (error while sending response)\n |\n \"\"\"\n\n IDLE = 0 # Waiting for request\n REQUEST = 1 # Request headers being received\n HANDLER = 3 # Headers done, handler running\n RESPONSE = 4 # Response headers sent, body in progress\n FAILED = 100 # Unrecoverable state (error while sending response)\n\n\nHTTP_CONTINUE = b\"HTTP/1.1 100 Continue\\r\\n\\r\\n\"\n\n\nclass Http:\n \"\"\"\n Internal helper for managing the HTTP request/response cycle\n\n :raises ServerError:\n :raises PayloadTooLarge:\n :raises Exception:\n :raises InvalidUsage:\n :raises HeaderExpectationFailed:\n :raises RuntimeError:\n :raises ServerError:\n :raises ServerError:\n :raises InvalidUsage:\n :raises InvalidUsage:\n :raises InvalidUsage:\n :raises PayloadTooLarge:\n :raises RuntimeError:\n \"\"\"\n\n HEADER_CEILING = 16_384\n HEADER_MAX_SIZE = 0\n\n __slots__ = [\n \"_send\",\n \"_receive_more\",\n \"recv_buffer\",\n \"protocol\",\n \"expecting_continue\",\n \"stage\",\n \"keep_alive\",\n \"head_only\",\n \"request\",\n \"exception\",\n \"url\",\n \"request_body\",\n \"request_bytes\",\n \"request_bytes_left\",\n \"request_max_size\",\n \"response\",\n \"response_func\",\n \"response_size\",\n \"response_bytes_left\",\n \"upgrade_websocket\",\n ]\n\n def __init__(self, protocol):\n self._send = protocol.send\n self._receive_more = protocol.receive_more\n self.recv_buffer = protocol.recv_buffer\n self.protocol = protocol\n self.expecting_continue: bool = False\n self.stage: Stage = Stage.IDLE\n self.request_body = None\n self.request_bytes = None\n self.request_bytes_left = None\n self.request_max_size = protocol.request_max_size\n self.keep_alive = True\n self.head_only = None\n self.request: Request = None\n self.response: BaseHTTPResponse = None\n self.exception = None\n self.url = None\n self.upgrade_websocket = False\n\n def __bool__(self):\n \"\"\"Test if request handling is in progress\"\"\"\n return self.stage in (Stage.HANDLER, Stage.RESPONSE)\n\n async def http1(self):\n \"\"\"\n HTTP 1.1 connection handler\n \"\"\"\n while True: # As long as connection stays keep-alive\n try:\n # Receive and handle a request\n self.stage = Stage.REQUEST\n self.response_func = self.http1_response_header\n\n await self.http1_request_header()\n\n self.request.conn_info = self.protocol.conn_info\n await self.protocol.request_handler(self.request)\n\n # Handler finished, response should've been sent\n if self.stage is Stage.HANDLER and not self.upgrade_websocket:\n raise ServerError(\"Handler produced no response\")\n\n if self.stage is Stage.RESPONSE:\n await self.response.send(end_stream=True)\n except CancelledError:\n # Write an appropriate response before exiting\n e = self.exception or ServiceUnavailable(\"Cancelled\")\n self.exception = None\n self.keep_alive = False\n await self.error_response(e)\n except Exception as e:\n # Write an error response\n await self.error_response(e)\n\n # Try to consume any remaining request body\n if self.request_body:\n if self.response and 200 <= self.response.status < 300:\n error_logger.error(f\"{self.request} body not consumed.\")\n\n try:\n async for _ in self:\n pass\n except PayloadTooLarge:\n # We won't read the body and that may cause httpx and\n # tests to fail. This little delay allows clients to push\n # a small request into network buffers before we close the\n # socket, so that they are then able to read the response.\n await sleep(0.001)\n self.keep_alive = False\n\n # Exit and disconnect if no more requests can be taken\n if self.stage is not Stage.IDLE or not self.keep_alive:\n break\n\n # Wait for next request\n if not self.recv_buffer:\n await self._receive_more()\n\n async def http1_request_header(self):\n \"\"\"\n Receive and parse request header into self.request.\n \"\"\"\n # Receive until full header is in buffer\n buf = self.recv_buffer\n pos = 0\n\n while True:\n pos = buf.find(b\"\\r\\n\\r\\n\", pos)\n if pos != -1:\n break\n\n pos = max(0, len(buf) - 3)\n if pos >= self.HEADER_MAX_SIZE:\n break\n\n await self._receive_more()\n\n if pos >= self.HEADER_MAX_SIZE:\n raise PayloadTooLarge(\"Request header exceeds the size limit\")\n\n # Parse header content\n try:\n head = buf[:pos]\n raw_headers = head.decode(errors=\"surrogateescape\")\n reqline, *split_headers = raw_headers.split(\"\\r\\n\")\n method, self.url, protocol = reqline.split(\" \")\n\n if protocol == \"HTTP/1.1\":\n self.keep_alive = True\n elif protocol == \"HTTP/1.0\":\n self.keep_alive = False\n else:\n raise Exception # Raise a Bad Request on try-except\n\n self.head_only = method.upper() == \"HEAD\"\n request_body = False\n headers = []\n\n for name, value in (h.split(\":\", 1) for h in split_headers):\n name, value = h = name.lower(), value.lstrip()\n\n if name in (\"content-length\", \"transfer-encoding\"):\n request_body = True\n elif name == \"connection\":\n self.keep_alive = value.lower() == \"keep-alive\"\n\n headers.append(h)\n except Exception:\n raise InvalidUsage(\"Bad Request\")\n\n headers_instance = Header(headers)\n self.upgrade_websocket = (\n headers_instance.getone(\"upgrade\", \"\").lower() == \"websocket\"\n )\n\n # Prepare a Request object\n request = self.protocol.request_class(\n url_bytes=self.url.encode(),\n headers=headers_instance,\n head=bytes(head),\n version=protocol[5:],\n method=method,\n transport=self.protocol.transport,\n app=self.protocol.app,\n )\n\n # Prepare for request body\n self.request_bytes_left = self.request_bytes = 0\n if request_body:\n headers = request.headers\n expect = headers.getone(\"expect\", None)\n\n if expect is not None:\n if expect.lower() == \"100-continue\":\n self.expecting_continue = True\n else:\n raise HeaderExpectationFailed(f\"Unknown Expect: {expect}\")\n\n if headers.getone(\"transfer-encoding\", None) == \"chunked\":\n self.request_body = \"chunked\"\n pos -= 2 # One CRLF stays in buffer\n else:\n self.request_body = True\n self.request_bytes_left = self.request_bytes = int(\n headers[\"content-length\"]\n )\n\n # Remove header and its trailing CRLF\n del buf[: pos + 4]\n self.stage = Stage.HANDLER\n self.request, request.stream = request, self\n self.protocol.state[\"requests_count\"] += 1\n\n async def http1_response_header(\n self, data: bytes, end_stream: bool\n ) -> None:\n res = self.response\n\n # Compatibility with simple response body\n if not data and getattr(res, \"body\", None):\n data, end_stream = res.body, True # type: ignore\n\n size = len(data)\n headers = res.headers\n status = res.status\n self.response_size = size\n\n if not isinstance(status, int) or status < 200:\n raise RuntimeError(f\"Invalid response status {status!r}\")\n\n if not has_message_body(status):\n # Header-only response status\n self.response_func = None\n if (\n data\n or not end_stream\n or \"content-length\" in headers\n or \"transfer-encoding\" in headers\n ):\n data, size, end_stream = b\"\", 0, True\n headers.pop(\"content-length\", None)\n headers.pop(\"transfer-encoding\", None)\n logger.warning(\n f\"Message body set in response on {self.request.path}. \"\n f\"A {status} response may only have headers, no body.\"\n )\n elif self.head_only and \"content-length\" in headers:\n self.response_func = None\n elif end_stream:\n # Non-streaming response (all in one block)\n headers[\"content-length\"] = size\n self.response_func = None\n elif \"content-length\" in headers:\n # Streaming response with size known in advance\n self.response_bytes_left = int(headers[\"content-length\"]) - size\n self.response_func = self.http1_response_normal\n else:\n # Length not known, use chunked encoding\n headers[\"transfer-encoding\"] = \"chunked\"\n data = b\"%x\\r\\n%b\\r\\n\" % (size, data) if size else b\"\"\n self.response_func = self.http1_response_chunked\n\n if self.head_only:\n # Head request: don't send body\n data = b\"\"\n self.response_func = self.head_response_ignored\n\n headers[\"connection\"] = \"keep-alive\" if self.keep_alive else \"close\"\n ret = format_http1_response(status, res.processed_headers)\n if data:\n ret += data\n\n # Send a 100-continue if expected and not Expectation Failed\n if self.expecting_continue:\n self.expecting_continue = False\n if status != 417:\n ret = HTTP_CONTINUE + ret\n\n # Send response\n if self.protocol.access_log:\n self.log_response()\n\n await self._send(ret)\n self.stage = Stage.IDLE if end_stream else Stage.RESPONSE\n\n def head_response_ignored(self, data: bytes, end_stream: bool) -> None:\n \"\"\"\n HEAD response: body data silently ignored.\n \"\"\"\n if end_stream:\n self.response_func = None\n self.stage = Stage.IDLE\n\n async def http1_response_chunked(\n self, data: bytes, end_stream: bool\n ) -> None:\n \"\"\"\n Format a part of response body in chunked encoding.\n \"\"\"\n # Chunked encoding\n size = len(data)\n if end_stream:\n await self._send(\n b\"%x\\r\\n%b\\r\\n0\\r\\n\\r\\n\" % (size, data)\n if size\n else b\"0\\r\\n\\r\\n\"\n )\n self.response_func = None\n self.stage = Stage.IDLE\n elif size:\n await self._send(b\"%x\\r\\n%b\\r\\n\" % (size, data))\n\n async def http1_response_normal(\n self, data: bytes, end_stream: bool\n ) -> None:\n \"\"\"\n Format / keep track of non-chunked response.\n \"\"\"\n bytes_left = self.response_bytes_left - len(data)\n if bytes_left <= 0:\n if bytes_left < 0:\n raise ServerError(\"Response was bigger than content-length\")\n\n await self._send(data)\n self.response_func = None\n self.stage = Stage.IDLE\n else:\n if end_stream:\n raise ServerError(\"Response was smaller than content-length\")\n\n await self._send(data)\n self.response_bytes_left = bytes_left\n\n async def error_response(self, exception: Exception) -> None:\n \"\"\"\n Handle response when exception encountered\n \"\"\"\n # Disconnect after an error if in any other state than handler\n if self.stage is not Stage.HANDLER:\n self.keep_alive = False\n\n # Request failure? Respond but then disconnect\n if self.stage is Stage.REQUEST:\n self.stage = Stage.HANDLER\n\n # From request and handler states we can respond, otherwise be silent\n if self.stage is Stage.HANDLER:\n app = self.protocol.app\n\n if self.request is None:\n self.create_empty_request()\n\n await app.handle_exception(self.request, exception)\n\n def create_empty_request(self) -> None:\n \"\"\"\n Current error handling code needs a request object that won't exist\n if an error occurred during before a request was received. Create a\n bogus response for error handling use.\n \"\"\"\n\n # FIXME: Avoid this by refactoring error handling and response code\n self.request = self.protocol.request_class(\n url_bytes=self.url.encode() if self.url else b\"*\",\n headers=Header({}),\n version=\"1.1\",\n method=\"NONE\",\n transport=self.protocol.transport,\n app=self.protocol.app,\n )\n self.request.stream = self\n\n def log_response(self) -> None:\n \"\"\"\n Helper method provided to enable the logging of responses in case if\n the :attr:`HttpProtocol.access_log` is enabled.\n \"\"\"\n req, res = self.request, self.response\n extra = {\n \"status\": getattr(res, \"status\", 0),\n \"byte\": getattr(\n self, \"response_bytes_left\", getattr(self, \"response_size\", -1)\n ),\n \"host\": \"UNKNOWN\",\n \"request\": \"nil\",\n }\n if req is not None:\n if req.ip:\n extra[\"host\"] = f\"{req.ip}:{req.port}\"\n extra[\"request\"] = f\"{req.method} {req.url}\"\n access_logger.info(\"\", extra=extra)\n\n # Request methods\n\n async def __aiter__(self):\n \"\"\"\n Async iterate over request body.\n \"\"\"\n while self.request_body:\n data = await self.read()\n\n if data:\n yield data\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read some bytes of request body.\n \"\"\"\n\n # Send a 100-continue if needed\n if self.expecting_continue:\n self.expecting_continue = False\n await self._send(HTTP_CONTINUE)\n\n # Receive request body chunk\n buf = self.recv_buffer\n if self.request_bytes_left == 0 and self.request_body == \"chunked\":\n # Process a chunk header: \\r\\n<size>[;<chunk extensions>]\\r\\n\n while True:\n pos = buf.find(b\"\\r\\n\", 3)\n\n if pos != -1:\n break\n\n if len(buf) > 64:\n self.keep_alive = False\n raise InvalidUsage(\"Bad chunked encoding\")\n\n await self._receive_more()\n\n try:\n size = int(buf[2:pos].split(b\";\", 1)[0].decode(), 16)\n except Exception:\n self.keep_alive = False\n raise InvalidUsage(\"Bad chunked encoding\")\n\n del buf[: pos + 2]\n\n if size <= 0:\n self.request_body = None\n\n if size < 0:\n self.keep_alive = False\n raise InvalidUsage(\"Bad chunked encoding\")\n\n return None\n\n self.request_bytes_left = size\n self.request_bytes += size\n\n # Request size limit\n if self.request_bytes > self.request_max_size:\n self.keep_alive = False\n raise PayloadTooLarge(\"Request body exceeds the size limit\")\n\n # End of request body?\n if not self.request_bytes_left:\n self.request_body = None\n return None\n\n # At this point we are good to read/return up to request_bytes_left\n if not buf:\n await self._receive_more()\n\n data = bytes(buf[: self.request_bytes_left])\n size = len(data)\n\n del buf[:size]\n\n self.request_bytes_left -= size\n\n return data\n\n # Response methods\n\n def respond(self, response: BaseHTTPResponse) -> BaseHTTPResponse:\n \"\"\"\n Initiate new streaming response.\n\n Nothing is sent until the first send() call on the returned object, and\n calling this function multiple times will just alter the response to be\n given.\n \"\"\"\n if self.stage is not Stage.HANDLER:\n self.stage = Stage.FAILED\n raise RuntimeError(\"Response already started\")\n\n self.response, response.stream = response, self\n return response\n\n @property\n def send(self):\n return self.response_func\n\n @classmethod\n def set_header_max_size(cls, *sizes: int):\n cls.HEADER_MAX_SIZE = min(\n *sizes,\n cls.HEADER_CEILING,\n )\n", "path": "sanic/http.py" } ]
[ { "content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional\n\n\nif TYPE_CHECKING:\n from sanic.request import Request\n from sanic.response import BaseHTTPResponse\n\nfrom asyncio import CancelledError, sleep\nfrom enum import Enum\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import (\n HeaderExpectationFailed,\n InvalidUsage,\n PayloadTooLarge,\n ServerError,\n ServiceUnavailable,\n)\nfrom sanic.headers import format_http1_response\nfrom sanic.helpers import has_message_body\nfrom sanic.log import access_logger, error_logger, logger\n\n\nclass Stage(Enum):\n \"\"\"\n Enum for representing the stage of the request/response cycle\n\n | ``IDLE`` Waiting for request\n | ``REQUEST`` Request headers being received\n | ``HANDLER`` Headers done, handler running\n | ``RESPONSE`` Response headers sent, body in progress\n | ``FAILED`` Unrecoverable state (error while sending response)\n |\n \"\"\"\n\n IDLE = 0 # Waiting for request\n REQUEST = 1 # Request headers being received\n HANDLER = 3 # Headers done, handler running\n RESPONSE = 4 # Response headers sent, body in progress\n FAILED = 100 # Unrecoverable state (error while sending response)\n\n\nHTTP_CONTINUE = b\"HTTP/1.1 100 Continue\\r\\n\\r\\n\"\n\n\nclass Http:\n \"\"\"\n Internal helper for managing the HTTP request/response cycle\n\n :raises ServerError:\n :raises PayloadTooLarge:\n :raises Exception:\n :raises InvalidUsage:\n :raises HeaderExpectationFailed:\n :raises RuntimeError:\n :raises ServerError:\n :raises ServerError:\n :raises InvalidUsage:\n :raises InvalidUsage:\n :raises InvalidUsage:\n :raises PayloadTooLarge:\n :raises RuntimeError:\n \"\"\"\n\n HEADER_CEILING = 16_384\n HEADER_MAX_SIZE = 0\n\n __slots__ = [\n \"_send\",\n \"_receive_more\",\n \"recv_buffer\",\n \"protocol\",\n \"expecting_continue\",\n \"stage\",\n \"keep_alive\",\n \"head_only\",\n \"request\",\n \"exception\",\n \"url\",\n \"request_body\",\n \"request_bytes\",\n \"request_bytes_left\",\n \"request_max_size\",\n \"response\",\n \"response_func\",\n \"response_size\",\n \"response_bytes_left\",\n \"upgrade_websocket\",\n ]\n\n def __init__(self, protocol):\n self._send = protocol.send\n self._receive_more = protocol.receive_more\n self.recv_buffer = protocol.recv_buffer\n self.protocol = protocol\n self.expecting_continue: bool = False\n self.stage: Stage = Stage.IDLE\n self.request_body = None\n self.request_bytes = None\n self.request_bytes_left = None\n self.request_max_size = protocol.request_max_size\n self.keep_alive = True\n self.head_only = None\n self.request: Request = None\n self.response: BaseHTTPResponse = None\n self.exception = None\n self.url = None\n self.upgrade_websocket = False\n\n def __bool__(self):\n \"\"\"Test if request handling is in progress\"\"\"\n return self.stage in (Stage.HANDLER, Stage.RESPONSE)\n\n async def http1(self):\n \"\"\"\n HTTP 1.1 connection handler\n \"\"\"\n while True: # As long as connection stays keep-alive\n try:\n # Receive and handle a request\n self.stage = Stage.REQUEST\n self.response_func = self.http1_response_header\n\n await self.http1_request_header()\n\n self.request.conn_info = self.protocol.conn_info\n await self.protocol.request_handler(self.request)\n\n # Handler finished, response should've been sent\n if self.stage is Stage.HANDLER and not self.upgrade_websocket:\n raise ServerError(\"Handler produced no response\")\n\n if self.stage is Stage.RESPONSE:\n await self.response.send(end_stream=True)\n except CancelledError:\n # Write an appropriate response before exiting\n e = self.exception or ServiceUnavailable(\"Cancelled\")\n self.exception = None\n self.keep_alive = False\n await self.error_response(e)\n except Exception as e:\n # Write an error response\n await self.error_response(e)\n\n # Try to consume any remaining request body\n if self.request_body:\n if self.response and 200 <= self.response.status < 300:\n error_logger.error(f\"{self.request} body not consumed.\")\n\n try:\n async for _ in self:\n pass\n except PayloadTooLarge:\n # We won't read the body and that may cause httpx and\n # tests to fail. This little delay allows clients to push\n # a small request into network buffers before we close the\n # socket, so that they are then able to read the response.\n await sleep(0.001)\n self.keep_alive = False\n\n # Exit and disconnect if no more requests can be taken\n if self.stage is not Stage.IDLE or not self.keep_alive:\n break\n\n # Wait for next request\n if not self.recv_buffer:\n await self._receive_more()\n\n async def http1_request_header(self):\n \"\"\"\n Receive and parse request header into self.request.\n \"\"\"\n # Receive until full header is in buffer\n buf = self.recv_buffer\n pos = 0\n\n while True:\n pos = buf.find(b\"\\r\\n\\r\\n\", pos)\n if pos != -1:\n break\n\n pos = max(0, len(buf) - 3)\n if pos >= self.HEADER_MAX_SIZE:\n break\n\n await self._receive_more()\n\n if pos >= self.HEADER_MAX_SIZE:\n raise PayloadTooLarge(\"Request header exceeds the size limit\")\n\n # Parse header content\n try:\n head = buf[:pos]\n raw_headers = head.decode(errors=\"surrogateescape\")\n reqline, *split_headers = raw_headers.split(\"\\r\\n\")\n method, self.url, protocol = reqline.split(\" \")\n\n if protocol == \"HTTP/1.1\":\n self.keep_alive = True\n elif protocol == \"HTTP/1.0\":\n self.keep_alive = False\n else:\n raise Exception # Raise a Bad Request on try-except\n\n self.head_only = method.upper() == \"HEAD\"\n request_body = False\n headers = []\n\n for name, value in (h.split(\":\", 1) for h in split_headers):\n name, value = h = name.lower(), value.lstrip()\n\n if name in (\"content-length\", \"transfer-encoding\"):\n request_body = True\n elif name == \"connection\":\n self.keep_alive = value.lower() == \"keep-alive\"\n\n headers.append(h)\n except Exception:\n raise InvalidUsage(\"Bad Request\")\n\n headers_instance = Header(headers)\n self.upgrade_websocket = (\n headers_instance.getone(\"upgrade\", \"\").lower() == \"websocket\"\n )\n\n # Prepare a Request object\n request = self.protocol.request_class(\n url_bytes=self.url.encode(),\n headers=headers_instance,\n head=bytes(head),\n version=protocol[5:],\n method=method,\n transport=self.protocol.transport,\n app=self.protocol.app,\n )\n\n # Prepare for request body\n self.request_bytes_left = self.request_bytes = 0\n if request_body:\n headers = request.headers\n expect = headers.getone(\"expect\", None)\n\n if expect is not None:\n if expect.lower() == \"100-continue\":\n self.expecting_continue = True\n else:\n raise HeaderExpectationFailed(f\"Unknown Expect: {expect}\")\n\n if headers.getone(\"transfer-encoding\", None) == \"chunked\":\n self.request_body = \"chunked\"\n pos -= 2 # One CRLF stays in buffer\n else:\n self.request_body = True\n self.request_bytes_left = self.request_bytes = int(\n headers[\"content-length\"]\n )\n\n # Remove header and its trailing CRLF\n del buf[: pos + 4]\n self.stage = Stage.HANDLER\n self.request, request.stream = request, self\n self.protocol.state[\"requests_count\"] += 1\n\n async def http1_response_header(\n self, data: bytes, end_stream: bool\n ) -> None:\n res = self.response\n\n # Compatibility with simple response body\n if not data and getattr(res, \"body\", None):\n data, end_stream = res.body, True # type: ignore\n\n size = len(data)\n headers = res.headers\n status = res.status\n self.response_size = size\n\n if not isinstance(status, int) or status < 200:\n raise RuntimeError(f\"Invalid response status {status!r}\")\n\n if not has_message_body(status):\n # Header-only response status\n self.response_func = None\n if (\n data\n or not end_stream\n or \"content-length\" in headers\n or \"transfer-encoding\" in headers\n ):\n data, size, end_stream = b\"\", 0, True\n headers.pop(\"content-length\", None)\n headers.pop(\"transfer-encoding\", None)\n logger.warning(\n f\"Message body set in response on {self.request.path}. \"\n f\"A {status} response may only have headers, no body.\"\n )\n elif self.head_only and \"content-length\" in headers:\n self.response_func = None\n elif end_stream:\n # Non-streaming response (all in one block)\n headers[\"content-length\"] = size\n self.response_func = None\n elif \"content-length\" in headers:\n # Streaming response with size known in advance\n self.response_bytes_left = int(headers[\"content-length\"]) - size\n self.response_func = self.http1_response_normal\n else:\n # Length not known, use chunked encoding\n headers[\"transfer-encoding\"] = \"chunked\"\n data = b\"%x\\r\\n%b\\r\\n\" % (size, data) if size else b\"\"\n self.response_func = self.http1_response_chunked\n\n if self.head_only:\n # Head request: don't send body\n data = b\"\"\n self.response_func = self.head_response_ignored\n\n headers[\"connection\"] = \"keep-alive\" if self.keep_alive else \"close\"\n ret = format_http1_response(status, res.processed_headers)\n if data:\n ret += data\n\n # Send a 100-continue if expected and not Expectation Failed\n if self.expecting_continue:\n self.expecting_continue = False\n if status != 417:\n ret = HTTP_CONTINUE + ret\n\n # Send response\n if self.protocol.access_log:\n self.log_response()\n\n await self._send(ret)\n self.stage = Stage.IDLE if end_stream else Stage.RESPONSE\n\n def head_response_ignored(self, data: bytes, end_stream: bool) -> None:\n \"\"\"\n HEAD response: body data silently ignored.\n \"\"\"\n if end_stream:\n self.response_func = None\n self.stage = Stage.IDLE\n\n async def http1_response_chunked(\n self, data: bytes, end_stream: bool\n ) -> None:\n \"\"\"\n Format a part of response body in chunked encoding.\n \"\"\"\n # Chunked encoding\n size = len(data)\n if end_stream:\n await self._send(\n b\"%x\\r\\n%b\\r\\n0\\r\\n\\r\\n\" % (size, data)\n if size\n else b\"0\\r\\n\\r\\n\"\n )\n self.response_func = None\n self.stage = Stage.IDLE\n elif size:\n await self._send(b\"%x\\r\\n%b\\r\\n\" % (size, data))\n\n async def http1_response_normal(\n self, data: bytes, end_stream: bool\n ) -> None:\n \"\"\"\n Format / keep track of non-chunked response.\n \"\"\"\n bytes_left = self.response_bytes_left - len(data)\n if bytes_left <= 0:\n if bytes_left < 0:\n raise ServerError(\"Response was bigger than content-length\")\n\n await self._send(data)\n self.response_func = None\n self.stage = Stage.IDLE\n else:\n if end_stream:\n raise ServerError(\"Response was smaller than content-length\")\n\n await self._send(data)\n self.response_bytes_left = bytes_left\n\n async def error_response(self, exception: Exception) -> None:\n \"\"\"\n Handle response when exception encountered\n \"\"\"\n # Disconnect after an error if in any other state than handler\n if self.stage is not Stage.HANDLER:\n self.keep_alive = False\n\n # Request failure? Respond but then disconnect\n if self.stage is Stage.REQUEST:\n self.stage = Stage.HANDLER\n\n # From request and handler states we can respond, otherwise be silent\n if self.stage is Stage.HANDLER:\n app = self.protocol.app\n\n if self.request is None:\n self.create_empty_request()\n\n await app.handle_exception(self.request, exception)\n\n def create_empty_request(self) -> None:\n \"\"\"\n Current error handling code needs a request object that won't exist\n if an error occurred during before a request was received. Create a\n bogus response for error handling use.\n \"\"\"\n\n # FIXME: Avoid this by refactoring error handling and response code\n self.request = self.protocol.request_class(\n url_bytes=self.url.encode() if self.url else b\"*\",\n headers=Header({}),\n version=\"1.1\",\n method=\"NONE\",\n transport=self.protocol.transport,\n app=self.protocol.app,\n )\n self.request.stream = self\n\n def log_response(self) -> None:\n \"\"\"\n Helper method provided to enable the logging of responses in case if\n the :attr:`HttpProtocol.access_log` is enabled.\n \"\"\"\n req, res = self.request, self.response\n extra = {\n \"status\": getattr(res, \"status\", 0),\n \"byte\": getattr(\n self, \"response_bytes_left\", getattr(self, \"response_size\", -1)\n ),\n \"host\": \"UNKNOWN\",\n \"request\": \"nil\",\n }\n if req is not None:\n if req.ip:\n extra[\"host\"] = f\"{req.ip}:{req.port}\"\n extra[\"request\"] = f\"{req.method} {req.url}\"\n access_logger.info(\"\", extra=extra)\n\n # Request methods\n\n async def __aiter__(self):\n \"\"\"\n Async iterate over request body.\n \"\"\"\n while self.request_body:\n data = await self.read()\n\n if data:\n yield data\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read some bytes of request body.\n \"\"\"\n\n # Send a 100-continue if needed\n if self.expecting_continue:\n self.expecting_continue = False\n await self._send(HTTP_CONTINUE)\n\n # Receive request body chunk\n buf = self.recv_buffer\n if self.request_bytes_left == 0 and self.request_body == \"chunked\":\n # Process a chunk header: \\r\\n<size>[;<chunk extensions>]\\r\\n\n while True:\n pos = buf.find(b\"\\r\\n\", 3)\n\n if pos != -1:\n break\n\n if len(buf) > 64:\n self.keep_alive = False\n raise InvalidUsage(\"Bad chunked encoding\")\n\n await self._receive_more()\n\n try:\n size = int(buf[2:pos].split(b\";\", 1)[0].decode(), 16)\n except Exception:\n self.keep_alive = False\n raise InvalidUsage(\"Bad chunked encoding\")\n\n del buf[: pos + 2]\n\n if size <= 0:\n self.request_body = None\n # Because we are leaving one CRLF in the buffer, we manually\n # reset the buffer here\n self.recv_buffer = bytearray()\n\n if size < 0:\n self.keep_alive = False\n raise InvalidUsage(\"Bad chunked encoding\")\n\n return None\n\n self.request_bytes_left = size\n self.request_bytes += size\n\n # Request size limit\n if self.request_bytes > self.request_max_size:\n self.keep_alive = False\n raise PayloadTooLarge(\"Request body exceeds the size limit\")\n\n # End of request body?\n if not self.request_bytes_left:\n self.request_body = None\n return None\n\n # At this point we are good to read/return up to request_bytes_left\n if not buf:\n await self._receive_more()\n\n data = bytes(buf[: self.request_bytes_left])\n size = len(data)\n\n del buf[:size]\n\n self.request_bytes_left -= size\n\n return data\n\n # Response methods\n\n def respond(self, response: BaseHTTPResponse) -> BaseHTTPResponse:\n \"\"\"\n Initiate new streaming response.\n\n Nothing is sent until the first send() call on the returned object, and\n calling this function multiple times will just alter the response to be\n given.\n \"\"\"\n if self.stage is not Stage.HANDLER:\n self.stage = Stage.FAILED\n raise RuntimeError(\"Response already started\")\n\n self.response, response.stream = response, self\n return response\n\n @property\n def send(self):\n return self.response_func\n\n @classmethod\n def set_header_max_size(cls, *sizes: int):\n cls.HEADER_MAX_SIZE = min(\n *sizes,\n cls.HEADER_CEILING,\n )\n", "path": "sanic/http.py" } ]
diff --git a/sanic/http.py b/sanic/http.py index 80301aff5c..a594e60c5e 100644 --- a/sanic/http.py +++ b/sanic/http.py @@ -490,6 +490,9 @@ async def read(self) -> Optional[bytes]: if size <= 0: self.request_body = None + # Because we are leaving one CRLF in the buffer, we manually + # reset the buffer here + self.recv_buffer = bytearray() if size < 0: self.keep_alive = False
google__pytype-144
Future-proof preconditions.py:_TOKEN_RE 3.7 adds a warning about possible future changes to re: https://bugs.python.org/issue30349 A future version of python will add nested sets, which allows nesting of sets using `[...]`. Escape the inner `[` in the re so it doesn't trigger a nested set. Closes #140.
[ { "content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Preconditions for automatic argument checking.\"\"\"\n\nimport re\n\nfrom pytype import utils\n\n\nclass PreconditionError(ValueError):\n pass\n\n\nclass _Precondition(object):\n \"\"\"Base class for preconditions.\"\"\"\n\n def check(self, value):\n \"\"\"Raise PreconditionError if value does not match condition.\"\"\"\n raise NotImplementedError\n\n def allowed_types(self):\n \"\"\"Returns a set of types or typenames that are allowed.\"\"\"\n raise NotImplementedError\n\n\nclass _ClassNamePrecondition(_Precondition):\n \"\"\"Precondition that expects an instance of a specific class.\"\"\"\n\n def __init__(self, class_name):\n super(_ClassNamePrecondition, self).__init__()\n self._class_name = class_name\n\n def check(self, value):\n actual = type(value).__name__\n if actual != self._class_name:\n raise PreconditionError(\n \"actual=%s, expected=%s\" % (actual, self._class_name))\n\n def allowed_types(self):\n return {self._class_name}\n\n\nclass _IsInstancePrecondition(_Precondition):\n \"\"\"Precondition that expects an instance of a class or subclass.\"\"\"\n\n def __init__(self, cls):\n super(_IsInstancePrecondition, self).__init__()\n self._cls = cls\n\n def check(self, value):\n if not isinstance(value, self._cls):\n raise PreconditionError(\n \"actual=%s, expected_superclass=%s\" % (\n type(value).__name__, self._cls.__name__))\n\n def allowed_types(self):\n return {self._cls}\n\n\n_REGISTERED_CLASSES = {}\n\n\ndef register(cls):\n \"\"\"Register a class object for use in {X} syntax.\"\"\"\n name = cls.__name__\n assert name not in _REGISTERED_CLASSES\n _REGISTERED_CLASSES[name] = _IsInstancePrecondition(cls)\n\n\nclass _TuplePrecondition(_Precondition):\n \"\"\"Precondition that expects a tuple.\"\"\"\n\n def __init__(self, element_condition):\n super(_TuplePrecondition, self).__init__()\n self._element_condition = element_condition\n\n def check(self, value):\n if not isinstance(value, tuple):\n raise PreconditionError(\n \"actual=%s, expected=tuple\" % type(value).__name__)\n for v in value:\n self._element_condition.check(v)\n\n def allowed_types(self):\n return self._element_condition.allowed_types()\n\n\nclass _OrPrecondition(_Precondition):\n \"\"\"Precondition that expects one of various choices to match.\"\"\"\n\n def __init__(self, choices):\n super(_OrPrecondition, self).__init__()\n self._choices = choices\n\n def check(self, value):\n errors = []\n for c in self._choices:\n try:\n c.check(value)\n return\n except PreconditionError as e:\n errors.append(e)\n raise PreconditionError(\n \" or \".join(\"(%s)\" % utils.message(e) for e in errors))\n\n def allowed_types(self):\n allowed = set()\n for c in self._choices:\n allowed |= c.allowed_types()\n return allowed\n\n\nclass CallChecker(object):\n \"\"\"Class that performs argument checks against a collection of conditions.\"\"\"\n\n def __init__(self, condition_pairs):\n \"\"\"Create a checker given a sequence of (name, precondition) pairs.\"\"\"\n self._arg_sequence = tuple(condition_pairs)\n self._arg_map = dict(self._arg_sequence)\n\n def check(self, *args, **kwargs):\n \"\"\"Raise PreconditionError if the actual call is invalid.\"\"\"\n # This check is intended to be in addition to an actual call, so an\n # incorrect number of args or undefined kwargs should be caught elsewhere.\n for value, pair in zip(args, self._arg_sequence):\n name, condition = pair\n self._check_arg(condition, name, value)\n for name, value in kwargs.items():\n condition = self._arg_map.get(name)\n self._check_arg(condition, name, value)\n\n def _check_arg(self, condition, name, value):\n if condition:\n try:\n condition.check(value)\n except PreconditionError as e:\n raise PreconditionError(\"argument=%s: %s.\" % (name, utils.message(e)))\n\n def allowed_types(self):\n \"\"\"Determines the types and typenames allowed by calls to the checker.\n\n Returns:\n A set of types and/or typenames (strings). A typename matches\n only that one class while a type matches any subclass of the type.\n \"\"\"\n allowed = set()\n for _, c in self._arg_sequence:\n allowed |= c.allowed_types()\n return allowed\n\n\n# RE to match a single token. Leading whitepace is ignored.\n_TOKEN_RE = re.compile(\n r\"\\s*(?:(?P<literal>[[\\]{}])|(?P<word>[a-zA-Z_]\\w*))\")\n\n# Token codes (aside from literal characters)\n_TOKEN_NAME = 1\n_TOKEN_TUPLE = 2\n_TOKEN_OR = 3\n\n_RESERVED = {\n \"tuple\": _TOKEN_TUPLE,\n \"or\": _TOKEN_OR,\n}\n\n\nclass _Parser(object):\n \"\"\"A parser for precondition specifications.\"\"\"\n\n def __init__(self, spec):\n self._spec = spec.strip() # Must strip trailing whitespace.\n self._pos = 0\n self._pending_token = None\n\n def parse(self):\n \"\"\"Parse the spec and return a precondition.\"\"\"\n cond = self._parse_or()\n self._expect(None)\n return cond\n\n def _peek_token(self):\n \"\"\"Return the token code of the next token (do not consume token).\"\"\"\n if self._pending_token is None:\n self._pending_token = self._pop_token()\n return self._pending_token[0]\n\n def _pop_token(self):\n \"\"\"Consume the next token and return (token_code, token_val).\"\"\"\n if self._pending_token is not None:\n result = self._pending_token\n self._pending_token = None\n return result\n\n if self._pos >= len(self._spec):\n return None, None\n m = _TOKEN_RE.match(self._spec, self._pos)\n if not m:\n raise ValueError(\"Syntax Error\")\n self._pos = m.end()\n literal = m.group(\"literal\")\n if literal:\n return literal, None\n word = m.group(\"word\")\n t = _RESERVED.get(word)\n if t:\n return t, None\n else:\n return _TOKEN_NAME, word\n\n def _expect(self, expected_code):\n \"\"\"Pop the next token, raise a ValueError if the code does not match.\"\"\"\n t, val = self._pop_token() # pylint: disable=unpacking-non-sequence\n if t != expected_code:\n raise ValueError(\"Syntax Error\")\n return val\n\n def _parse_or(self):\n \"\"\"Parse one or more conditions separated by \"or\".\"\"\"\n choices = [self._parse_one()]\n while self._peek_token() == _TOKEN_OR:\n self._pop_token()\n choices.append(self._parse_one())\n if len(choices) == 1:\n return choices[0]\n else:\n return _OrPrecondition(choices)\n\n def _parse_one(self):\n \"\"\"Parse a single condition (not including \"or\").\"\"\"\n t, val = self._pop_token() # pylint: disable=unpacking-non-sequence\n if t == _TOKEN_NAME:\n return _ClassNamePrecondition(val if val != \"None\" else \"NoneType\")\n elif t == \"{\":\n name = self._expect(_TOKEN_NAME)\n self._expect(\"}\")\n cond = _REGISTERED_CLASSES.get(name)\n if cond is None:\n raise ValueError(\n \"Class '%s' is not registered for preconditions.\" % name)\n return cond\n elif t == _TOKEN_TUPLE:\n self._expect(\"[\")\n element = self._parse_or()\n self._expect(\"]\")\n return _TuplePrecondition(element)\n raise ValueError(\"Syntax Error\")\n\n\ndef parse(spec):\n \"\"\"Return a _Precondition for the given string.\"\"\"\n return _Parser(spec).parse()\n\n\ndef parse_arg(arg_spec):\n \"\"\"Return (name, precondition) or (name, None) for given argument spec.\"\"\"\n name, _, spec = arg_spec.partition(\":\")\n return name, parse(spec) if spec else None\n", "path": "pytype/pytd/parse/preconditions.py" } ]
[ { "content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Preconditions for automatic argument checking.\"\"\"\n\nimport re\n\nfrom pytype import utils\n\n\nclass PreconditionError(ValueError):\n pass\n\n\nclass _Precondition(object):\n \"\"\"Base class for preconditions.\"\"\"\n\n def check(self, value):\n \"\"\"Raise PreconditionError if value does not match condition.\"\"\"\n raise NotImplementedError\n\n def allowed_types(self):\n \"\"\"Returns a set of types or typenames that are allowed.\"\"\"\n raise NotImplementedError\n\n\nclass _ClassNamePrecondition(_Precondition):\n \"\"\"Precondition that expects an instance of a specific class.\"\"\"\n\n def __init__(self, class_name):\n super(_ClassNamePrecondition, self).__init__()\n self._class_name = class_name\n\n def check(self, value):\n actual = type(value).__name__\n if actual != self._class_name:\n raise PreconditionError(\n \"actual=%s, expected=%s\" % (actual, self._class_name))\n\n def allowed_types(self):\n return {self._class_name}\n\n\nclass _IsInstancePrecondition(_Precondition):\n \"\"\"Precondition that expects an instance of a class or subclass.\"\"\"\n\n def __init__(self, cls):\n super(_IsInstancePrecondition, self).__init__()\n self._cls = cls\n\n def check(self, value):\n if not isinstance(value, self._cls):\n raise PreconditionError(\n \"actual=%s, expected_superclass=%s\" % (\n type(value).__name__, self._cls.__name__))\n\n def allowed_types(self):\n return {self._cls}\n\n\n_REGISTERED_CLASSES = {}\n\n\ndef register(cls):\n \"\"\"Register a class object for use in {X} syntax.\"\"\"\n name = cls.__name__\n assert name not in _REGISTERED_CLASSES\n _REGISTERED_CLASSES[name] = _IsInstancePrecondition(cls)\n\n\nclass _TuplePrecondition(_Precondition):\n \"\"\"Precondition that expects a tuple.\"\"\"\n\n def __init__(self, element_condition):\n super(_TuplePrecondition, self).__init__()\n self._element_condition = element_condition\n\n def check(self, value):\n if not isinstance(value, tuple):\n raise PreconditionError(\n \"actual=%s, expected=tuple\" % type(value).__name__)\n for v in value:\n self._element_condition.check(v)\n\n def allowed_types(self):\n return self._element_condition.allowed_types()\n\n\nclass _OrPrecondition(_Precondition):\n \"\"\"Precondition that expects one of various choices to match.\"\"\"\n\n def __init__(self, choices):\n super(_OrPrecondition, self).__init__()\n self._choices = choices\n\n def check(self, value):\n errors = []\n for c in self._choices:\n try:\n c.check(value)\n return\n except PreconditionError as e:\n errors.append(e)\n raise PreconditionError(\n \" or \".join(\"(%s)\" % utils.message(e) for e in errors))\n\n def allowed_types(self):\n allowed = set()\n for c in self._choices:\n allowed |= c.allowed_types()\n return allowed\n\n\nclass CallChecker(object):\n \"\"\"Class that performs argument checks against a collection of conditions.\"\"\"\n\n def __init__(self, condition_pairs):\n \"\"\"Create a checker given a sequence of (name, precondition) pairs.\"\"\"\n self._arg_sequence = tuple(condition_pairs)\n self._arg_map = dict(self._arg_sequence)\n\n def check(self, *args, **kwargs):\n \"\"\"Raise PreconditionError if the actual call is invalid.\"\"\"\n # This check is intended to be in addition to an actual call, so an\n # incorrect number of args or undefined kwargs should be caught elsewhere.\n for value, pair in zip(args, self._arg_sequence):\n name, condition = pair\n self._check_arg(condition, name, value)\n for name, value in kwargs.items():\n condition = self._arg_map.get(name)\n self._check_arg(condition, name, value)\n\n def _check_arg(self, condition, name, value):\n if condition:\n try:\n condition.check(value)\n except PreconditionError as e:\n raise PreconditionError(\"argument=%s: %s.\" % (name, utils.message(e)))\n\n def allowed_types(self):\n \"\"\"Determines the types and typenames allowed by calls to the checker.\n\n Returns:\n A set of types and/or typenames (strings). A typename matches\n only that one class while a type matches any subclass of the type.\n \"\"\"\n allowed = set()\n for _, c in self._arg_sequence:\n allowed |= c.allowed_types()\n return allowed\n\n\n# RE to match a single token. Leading whitepace is ignored.\n_TOKEN_RE = re.compile(\n r\"\\s*(?:(?P<literal>[\\[\\]{}])|(?P<word>[a-zA-Z_]\\w*))\")\n\n# Token codes (aside from literal characters)\n_TOKEN_NAME = 1\n_TOKEN_TUPLE = 2\n_TOKEN_OR = 3\n\n_RESERVED = {\n \"tuple\": _TOKEN_TUPLE,\n \"or\": _TOKEN_OR,\n}\n\n\nclass _Parser(object):\n \"\"\"A parser for precondition specifications.\"\"\"\n\n def __init__(self, spec):\n self._spec = spec.strip() # Must strip trailing whitespace.\n self._pos = 0\n self._pending_token = None\n\n def parse(self):\n \"\"\"Parse the spec and return a precondition.\"\"\"\n cond = self._parse_or()\n self._expect(None)\n return cond\n\n def _peek_token(self):\n \"\"\"Return the token code of the next token (do not consume token).\"\"\"\n if self._pending_token is None:\n self._pending_token = self._pop_token()\n return self._pending_token[0]\n\n def _pop_token(self):\n \"\"\"Consume the next token and return (token_code, token_val).\"\"\"\n if self._pending_token is not None:\n result = self._pending_token\n self._pending_token = None\n return result\n\n if self._pos >= len(self._spec):\n return None, None\n m = _TOKEN_RE.match(self._spec, self._pos)\n if not m:\n raise ValueError(\"Syntax Error\")\n self._pos = m.end()\n literal = m.group(\"literal\")\n if literal:\n return literal, None\n word = m.group(\"word\")\n t = _RESERVED.get(word)\n if t:\n return t, None\n else:\n return _TOKEN_NAME, word\n\n def _expect(self, expected_code):\n \"\"\"Pop the next token, raise a ValueError if the code does not match.\"\"\"\n t, val = self._pop_token() # pylint: disable=unpacking-non-sequence\n if t != expected_code:\n raise ValueError(\"Syntax Error\")\n return val\n\n def _parse_or(self):\n \"\"\"Parse one or more conditions separated by \"or\".\"\"\"\n choices = [self._parse_one()]\n while self._peek_token() == _TOKEN_OR:\n self._pop_token()\n choices.append(self._parse_one())\n if len(choices) == 1:\n return choices[0]\n else:\n return _OrPrecondition(choices)\n\n def _parse_one(self):\n \"\"\"Parse a single condition (not including \"or\").\"\"\"\n t, val = self._pop_token() # pylint: disable=unpacking-non-sequence\n if t == _TOKEN_NAME:\n return _ClassNamePrecondition(val if val != \"None\" else \"NoneType\")\n elif t == \"{\":\n name = self._expect(_TOKEN_NAME)\n self._expect(\"}\")\n cond = _REGISTERED_CLASSES.get(name)\n if cond is None:\n raise ValueError(\n \"Class '%s' is not registered for preconditions.\" % name)\n return cond\n elif t == _TOKEN_TUPLE:\n self._expect(\"[\")\n element = self._parse_or()\n self._expect(\"]\")\n return _TuplePrecondition(element)\n raise ValueError(\"Syntax Error\")\n\n\ndef parse(spec):\n \"\"\"Return a _Precondition for the given string.\"\"\"\n return _Parser(spec).parse()\n\n\ndef parse_arg(arg_spec):\n \"\"\"Return (name, precondition) or (name, None) for given argument spec.\"\"\"\n name, _, spec = arg_spec.partition(\":\")\n return name, parse(spec) if spec else None\n", "path": "pytype/pytd/parse/preconditions.py" } ]
diff --git a/pytype/pytd/parse/preconditions.py b/pytype/pytd/parse/preconditions.py index ebd9859e9..70fa62d5e 100644 --- a/pytype/pytd/parse/preconditions.py +++ b/pytype/pytd/parse/preconditions.py @@ -163,7 +163,7 @@ def allowed_types(self): # RE to match a single token. Leading whitepace is ignored. _TOKEN_RE = re.compile( - r"\s*(?:(?P<literal>[[\]{}])|(?P<word>[a-zA-Z_]\w*))") + r"\s*(?:(?P<literal>[\[\]{}])|(?P<word>[a-zA-Z_]\w*))") # Token codes (aside from literal characters) _TOKEN_NAME = 1
netbox-community__netbox-14370
"service" does not preserve the order of ports when editing ### NetBox version v3.6.5 ### Python version 3.8 ### Steps to Reproduce 1. Create a "service", and enter the ports as "9093,9095,9998-9999" 2. View the list of services 3. Edit the service (i.e. click the pencil icon at the end of the service row) ### Expected Behavior Either the ports to remain in the order originally entered, or to be sorted. (I note that the data type in the underlying Postgres column is `integer[]` which is an ordered list) ### Observed Behavior When viewing the table of services (`/ipam/services/`), the ports are shown in order: <img width="304" alt="image" src="https://github.com/netbox-community/netbox/assets/44789/632b5313-7241-45d3-8649-b16fb9c4b6f0"> It also shows the same when viewing the details of an individual service (e.g. `/ipam/services/2/`) However, when editing the service (`/ipam/services/2/edit/`), the ports are in a randomized order: <img width="681" alt="image" src="https://github.com/netbox-community/netbox/assets/44789/494f89cc-80b5-4b48-a27f-498992c159e3"> This matches what's in the database, which in the same randomized order: ``` netbox=# select ports from ipam_service where id=2 ports ----------------------- {9999,9093,9998,9095} (1 row) ```
[ { "content": "import re\n\nfrom django import forms\nfrom django.forms.models import fields_for_model\n\nfrom utilities.choices import unpack_grouped_choices\nfrom utilities.querysets import RestrictedQuerySet\nfrom .constants import *\n\n__all__ = (\n 'add_blank_choice',\n 'expand_alphanumeric_pattern',\n 'expand_ipaddress_pattern',\n 'form_from_model',\n 'get_field_value',\n 'get_selected_values',\n 'parse_alphanumeric_range',\n 'parse_numeric_range',\n 'restrict_form_fields',\n 'parse_csv',\n 'validate_csv',\n)\n\n\ndef parse_numeric_range(string, base=10):\n \"\"\"\n Expand a numeric range (continuous or not) into a decimal or\n hexadecimal list, as specified by the base parameter\n '0-3,5' => [0, 1, 2, 3, 5]\n '2,8-b,d,f' => [2, 8, 9, a, b, d, f]\n \"\"\"\n values = list()\n for dash_range in string.split(','):\n try:\n begin, end = dash_range.split('-')\n except ValueError:\n begin, end = dash_range, dash_range\n try:\n begin, end = int(begin.strip(), base=base), int(end.strip(), base=base) + 1\n except ValueError:\n raise forms.ValidationError(f'Range \"{dash_range}\" is invalid.')\n values.extend(range(begin, end))\n return list(set(values))\n\n\ndef parse_alphanumeric_range(string):\n \"\"\"\n Expand an alphanumeric range (continuous or not) into a list.\n 'a-d,f' => [a, b, c, d, f]\n '0-3,a-d' => [0, 1, 2, 3, a, b, c, d]\n \"\"\"\n values = []\n for dash_range in string.split(','):\n try:\n begin, end = dash_range.split('-')\n vals = begin + end\n # Break out of loop if there's an invalid pattern to return an error\n if (not (vals.isdigit() or vals.isalpha())) or (vals.isalpha() and not (vals.isupper() or vals.islower())):\n return []\n except ValueError:\n begin, end = dash_range, dash_range\n if begin.isdigit() and end.isdigit():\n if int(begin) >= int(end):\n raise forms.ValidationError(f'Range \"{dash_range}\" is invalid.')\n\n for n in list(range(int(begin), int(end) + 1)):\n values.append(n)\n else:\n # Value-based\n if begin == end:\n values.append(begin)\n # Range-based\n else:\n # Not a valid range (more than a single character)\n if not len(begin) == len(end) == 1:\n raise forms.ValidationError(f'Range \"{dash_range}\" is invalid.')\n\n if ord(begin) >= ord(end):\n raise forms.ValidationError(f'Range \"{dash_range}\" is invalid.')\n\n for n in list(range(ord(begin), ord(end) + 1)):\n values.append(chr(n))\n return values\n\n\ndef expand_alphanumeric_pattern(string):\n \"\"\"\n Expand an alphabetic pattern into a list of strings.\n \"\"\"\n lead, pattern, remnant = re.split(ALPHANUMERIC_EXPANSION_PATTERN, string, maxsplit=1)\n parsed_range = parse_alphanumeric_range(pattern)\n for i in parsed_range:\n if re.search(ALPHANUMERIC_EXPANSION_PATTERN, remnant):\n for string in expand_alphanumeric_pattern(remnant):\n yield \"{}{}{}\".format(lead, i, string)\n else:\n yield \"{}{}{}\".format(lead, i, remnant)\n\n\ndef expand_ipaddress_pattern(string, family):\n \"\"\"\n Expand an IP address pattern into a list of strings. Examples:\n '192.0.2.[1,2,100-250]/24' => ['192.0.2.1/24', '192.0.2.2/24', '192.0.2.100/24' ... '192.0.2.250/24']\n '2001:db8:0:[0,fd-ff]::/64' => ['2001:db8:0:0::/64', '2001:db8:0:fd::/64', ... '2001:db8:0:ff::/64']\n \"\"\"\n if family not in [4, 6]:\n raise Exception(\"Invalid IP address family: {}\".format(family))\n if family == 4:\n regex = IP4_EXPANSION_PATTERN\n base = 10\n else:\n regex = IP6_EXPANSION_PATTERN\n base = 16\n lead, pattern, remnant = re.split(regex, string, maxsplit=1)\n parsed_range = parse_numeric_range(pattern, base)\n for i in parsed_range:\n if re.search(regex, remnant):\n for string in expand_ipaddress_pattern(remnant, family):\n yield ''.join([lead, format(i, 'x' if family == 6 else 'd'), string])\n else:\n yield ''.join([lead, format(i, 'x' if family == 6 else 'd'), remnant])\n\n\ndef get_field_value(form, field_name):\n \"\"\"\n Return the current bound or initial value associated with a form field, prior to calling\n clean() for the form.\n \"\"\"\n field = form.fields[field_name]\n\n if form.is_bound:\n if data := form.data.get(field_name):\n if field.valid_value(data):\n return data\n\n return form.get_initial_for_field(field, field_name)\n\n\ndef get_selected_values(form, field_name):\n \"\"\"\n Return the list of selected human-friendly values for a form field\n \"\"\"\n if not hasattr(form, 'cleaned_data'):\n form.is_valid()\n filter_data = form.cleaned_data.get(field_name)\n field = form.fields[field_name]\n\n # Non-selection field\n if not hasattr(field, 'choices'):\n return [str(filter_data)]\n\n # Model choice field\n if type(field.choices) is forms.models.ModelChoiceIterator:\n # If this is a single-choice field, wrap its value in a list\n if not hasattr(filter_data, '__iter__'):\n values = [filter_data]\n else:\n values = filter_data\n\n else:\n # Static selection field\n choices = unpack_grouped_choices(field.choices)\n if type(filter_data) not in (list, tuple):\n filter_data = [filter_data] # Ensure filter data is iterable\n values = [\n label for value, label in choices if str(value) in filter_data or None in filter_data\n ]\n\n # If the field has a `null_option` attribute set and it is selected,\n # add it to the field's grouped choices.\n if getattr(field, 'null_option', None) and None in filter_data:\n values.remove(None)\n values.insert(0, field.null_option)\n\n return values\n\n\ndef add_blank_choice(choices):\n \"\"\"\n Add a blank choice to the beginning of a choices list.\n \"\"\"\n return ((None, '---------'),) + tuple(choices)\n\n\ndef form_from_model(model, fields):\n \"\"\"\n Return a Form class with the specified fields derived from a model. This is useful when we need a form to be used\n for creating objects, but want to avoid the model's validation (e.g. for bulk create/edit functions). All fields\n are marked as not required.\n \"\"\"\n form_fields = fields_for_model(model, fields=fields)\n for field in form_fields.values():\n field.required = False\n\n return type('FormFromModel', (forms.Form,), form_fields)\n\n\ndef restrict_form_fields(form, user, action='view'):\n \"\"\"\n Restrict all form fields which reference a RestrictedQuerySet. This ensures that users see only permitted objects\n as available choices.\n \"\"\"\n for field in form.fields.values():\n if hasattr(field, 'queryset') and issubclass(field.queryset.__class__, RestrictedQuerySet):\n field.queryset = field.queryset.restrict(user, action)\n\n\ndef parse_csv(reader):\n \"\"\"\n Parse a csv_reader object into a headers dictionary and a list of records dictionaries. Raise an error\n if the records are formatted incorrectly. Return headers and records as a tuple.\n \"\"\"\n records = []\n headers = {}\n\n # Consume the first line of CSV data as column headers. Create a dictionary mapping each header to an optional\n # \"to\" field specifying how the related object is being referenced. For example, importing a Device might use a\n # `site.slug` header, to indicate the related site is being referenced by its slug.\n\n for header in next(reader):\n header = header.strip()\n if '.' in header:\n field, to_field = header.split('.', 1)\n if field in headers:\n raise forms.ValidationError(f'Duplicate or conflicting column header for \"{field}\"')\n headers[field] = to_field\n else:\n if header in headers:\n raise forms.ValidationError(f'Duplicate or conflicting column header for \"{header}\"')\n headers[header] = None\n\n # Parse CSV rows into a list of dictionaries mapped from the column headers.\n for i, row in enumerate(reader, start=1):\n if len(row) != len(headers):\n raise forms.ValidationError(\n f\"Row {i}: Expected {len(headers)} columns but found {len(row)}\"\n )\n row = [col.strip() for col in row]\n record = dict(zip(headers.keys(), row))\n records.append(record)\n\n return headers, records\n\n\ndef validate_csv(headers, fields, required_fields):\n \"\"\"\n Validate that parsed csv data conforms to the object's available fields. Raise validation errors\n if parsed csv data contains invalid headers or does not contain required headers.\n \"\"\"\n # Validate provided column headers\n is_update = False\n for field, to_field in headers.items():\n if field == \"id\":\n is_update = True\n continue\n if field not in fields:\n raise forms.ValidationError(f'Unexpected column header \"{field}\" found.')\n if to_field and not hasattr(fields[field], 'to_field_name'):\n raise forms.ValidationError(f'Column \"{field}\" is not a related object; cannot use dots')\n if to_field and not hasattr(fields[field].queryset.model, to_field):\n raise forms.ValidationError(f'Invalid related object attribute for column \"{field}\": {to_field}')\n\n # Validate required fields (if not an update)\n if not is_update:\n for f in required_fields:\n if f not in headers:\n raise forms.ValidationError(f'Required column header \"{f}\" not found.')\n", "path": "netbox/utilities/forms/utils.py" } ]
[ { "content": "import re\n\nfrom django import forms\nfrom django.forms.models import fields_for_model\n\nfrom utilities.choices import unpack_grouped_choices\nfrom utilities.querysets import RestrictedQuerySet\nfrom .constants import *\n\n__all__ = (\n 'add_blank_choice',\n 'expand_alphanumeric_pattern',\n 'expand_ipaddress_pattern',\n 'form_from_model',\n 'get_field_value',\n 'get_selected_values',\n 'parse_alphanumeric_range',\n 'parse_numeric_range',\n 'restrict_form_fields',\n 'parse_csv',\n 'validate_csv',\n)\n\n\ndef parse_numeric_range(string, base=10):\n \"\"\"\n Expand a numeric range (continuous or not) into a decimal or\n hexadecimal list, as specified by the base parameter\n '0-3,5' => [0, 1, 2, 3, 5]\n '2,8-b,d,f' => [2, 8, 9, a, b, d, f]\n \"\"\"\n values = list()\n for dash_range in string.split(','):\n try:\n begin, end = dash_range.split('-')\n except ValueError:\n begin, end = dash_range, dash_range\n try:\n begin, end = int(begin.strip(), base=base), int(end.strip(), base=base) + 1\n except ValueError:\n raise forms.ValidationError(f'Range \"{dash_range}\" is invalid.')\n values.extend(range(begin, end))\n return sorted(set(values))\n\n\ndef parse_alphanumeric_range(string):\n \"\"\"\n Expand an alphanumeric range (continuous or not) into a list.\n 'a-d,f' => [a, b, c, d, f]\n '0-3,a-d' => [0, 1, 2, 3, a, b, c, d]\n \"\"\"\n values = []\n for dash_range in string.split(','):\n try:\n begin, end = dash_range.split('-')\n vals = begin + end\n # Break out of loop if there's an invalid pattern to return an error\n if (not (vals.isdigit() or vals.isalpha())) or (vals.isalpha() and not (vals.isupper() or vals.islower())):\n return []\n except ValueError:\n begin, end = dash_range, dash_range\n if begin.isdigit() and end.isdigit():\n if int(begin) >= int(end):\n raise forms.ValidationError(f'Range \"{dash_range}\" is invalid.')\n\n for n in list(range(int(begin), int(end) + 1)):\n values.append(n)\n else:\n # Value-based\n if begin == end:\n values.append(begin)\n # Range-based\n else:\n # Not a valid range (more than a single character)\n if not len(begin) == len(end) == 1:\n raise forms.ValidationError(f'Range \"{dash_range}\" is invalid.')\n\n if ord(begin) >= ord(end):\n raise forms.ValidationError(f'Range \"{dash_range}\" is invalid.')\n\n for n in list(range(ord(begin), ord(end) + 1)):\n values.append(chr(n))\n return values\n\n\ndef expand_alphanumeric_pattern(string):\n \"\"\"\n Expand an alphabetic pattern into a list of strings.\n \"\"\"\n lead, pattern, remnant = re.split(ALPHANUMERIC_EXPANSION_PATTERN, string, maxsplit=1)\n parsed_range = parse_alphanumeric_range(pattern)\n for i in parsed_range:\n if re.search(ALPHANUMERIC_EXPANSION_PATTERN, remnant):\n for string in expand_alphanumeric_pattern(remnant):\n yield \"{}{}{}\".format(lead, i, string)\n else:\n yield \"{}{}{}\".format(lead, i, remnant)\n\n\ndef expand_ipaddress_pattern(string, family):\n \"\"\"\n Expand an IP address pattern into a list of strings. Examples:\n '192.0.2.[1,2,100-250]/24' => ['192.0.2.1/24', '192.0.2.2/24', '192.0.2.100/24' ... '192.0.2.250/24']\n '2001:db8:0:[0,fd-ff]::/64' => ['2001:db8:0:0::/64', '2001:db8:0:fd::/64', ... '2001:db8:0:ff::/64']\n \"\"\"\n if family not in [4, 6]:\n raise Exception(\"Invalid IP address family: {}\".format(family))\n if family == 4:\n regex = IP4_EXPANSION_PATTERN\n base = 10\n else:\n regex = IP6_EXPANSION_PATTERN\n base = 16\n lead, pattern, remnant = re.split(regex, string, maxsplit=1)\n parsed_range = parse_numeric_range(pattern, base)\n for i in parsed_range:\n if re.search(regex, remnant):\n for string in expand_ipaddress_pattern(remnant, family):\n yield ''.join([lead, format(i, 'x' if family == 6 else 'd'), string])\n else:\n yield ''.join([lead, format(i, 'x' if family == 6 else 'd'), remnant])\n\n\ndef get_field_value(form, field_name):\n \"\"\"\n Return the current bound or initial value associated with a form field, prior to calling\n clean() for the form.\n \"\"\"\n field = form.fields[field_name]\n\n if form.is_bound:\n if data := form.data.get(field_name):\n if field.valid_value(data):\n return data\n\n return form.get_initial_for_field(field, field_name)\n\n\ndef get_selected_values(form, field_name):\n \"\"\"\n Return the list of selected human-friendly values for a form field\n \"\"\"\n if not hasattr(form, 'cleaned_data'):\n form.is_valid()\n filter_data = form.cleaned_data.get(field_name)\n field = form.fields[field_name]\n\n # Non-selection field\n if not hasattr(field, 'choices'):\n return [str(filter_data)]\n\n # Model choice field\n if type(field.choices) is forms.models.ModelChoiceIterator:\n # If this is a single-choice field, wrap its value in a list\n if not hasattr(filter_data, '__iter__'):\n values = [filter_data]\n else:\n values = filter_data\n\n else:\n # Static selection field\n choices = unpack_grouped_choices(field.choices)\n if type(filter_data) not in (list, tuple):\n filter_data = [filter_data] # Ensure filter data is iterable\n values = [\n label for value, label in choices if str(value) in filter_data or None in filter_data\n ]\n\n # If the field has a `null_option` attribute set and it is selected,\n # add it to the field's grouped choices.\n if getattr(field, 'null_option', None) and None in filter_data:\n values.remove(None)\n values.insert(0, field.null_option)\n\n return values\n\n\ndef add_blank_choice(choices):\n \"\"\"\n Add a blank choice to the beginning of a choices list.\n \"\"\"\n return ((None, '---------'),) + tuple(choices)\n\n\ndef form_from_model(model, fields):\n \"\"\"\n Return a Form class with the specified fields derived from a model. This is useful when we need a form to be used\n for creating objects, but want to avoid the model's validation (e.g. for bulk create/edit functions). All fields\n are marked as not required.\n \"\"\"\n form_fields = fields_for_model(model, fields=fields)\n for field in form_fields.values():\n field.required = False\n\n return type('FormFromModel', (forms.Form,), form_fields)\n\n\ndef restrict_form_fields(form, user, action='view'):\n \"\"\"\n Restrict all form fields which reference a RestrictedQuerySet. This ensures that users see only permitted objects\n as available choices.\n \"\"\"\n for field in form.fields.values():\n if hasattr(field, 'queryset') and issubclass(field.queryset.__class__, RestrictedQuerySet):\n field.queryset = field.queryset.restrict(user, action)\n\n\ndef parse_csv(reader):\n \"\"\"\n Parse a csv_reader object into a headers dictionary and a list of records dictionaries. Raise an error\n if the records are formatted incorrectly. Return headers and records as a tuple.\n \"\"\"\n records = []\n headers = {}\n\n # Consume the first line of CSV data as column headers. Create a dictionary mapping each header to an optional\n # \"to\" field specifying how the related object is being referenced. For example, importing a Device might use a\n # `site.slug` header, to indicate the related site is being referenced by its slug.\n\n for header in next(reader):\n header = header.strip()\n if '.' in header:\n field, to_field = header.split('.', 1)\n if field in headers:\n raise forms.ValidationError(f'Duplicate or conflicting column header for \"{field}\"')\n headers[field] = to_field\n else:\n if header in headers:\n raise forms.ValidationError(f'Duplicate or conflicting column header for \"{header}\"')\n headers[header] = None\n\n # Parse CSV rows into a list of dictionaries mapped from the column headers.\n for i, row in enumerate(reader, start=1):\n if len(row) != len(headers):\n raise forms.ValidationError(\n f\"Row {i}: Expected {len(headers)} columns but found {len(row)}\"\n )\n row = [col.strip() for col in row]\n record = dict(zip(headers.keys(), row))\n records.append(record)\n\n return headers, records\n\n\ndef validate_csv(headers, fields, required_fields):\n \"\"\"\n Validate that parsed csv data conforms to the object's available fields. Raise validation errors\n if parsed csv data contains invalid headers or does not contain required headers.\n \"\"\"\n # Validate provided column headers\n is_update = False\n for field, to_field in headers.items():\n if field == \"id\":\n is_update = True\n continue\n if field not in fields:\n raise forms.ValidationError(f'Unexpected column header \"{field}\" found.')\n if to_field and not hasattr(fields[field], 'to_field_name'):\n raise forms.ValidationError(f'Column \"{field}\" is not a related object; cannot use dots')\n if to_field and not hasattr(fields[field].queryset.model, to_field):\n raise forms.ValidationError(f'Invalid related object attribute for column \"{field}\": {to_field}')\n\n # Validate required fields (if not an update)\n if not is_update:\n for f in required_fields:\n if f not in headers:\n raise forms.ValidationError(f'Required column header \"{f}\" not found.')\n", "path": "netbox/utilities/forms/utils.py" } ]
diff --git a/netbox/utilities/forms/utils.py b/netbox/utilities/forms/utils.py index 4d737f16321..64864a6c130 100644 --- a/netbox/utilities/forms/utils.py +++ b/netbox/utilities/forms/utils.py @@ -40,7 +40,7 @@ def parse_numeric_range(string, base=10): except ValueError: raise forms.ValidationError(f'Range "{dash_range}" is invalid.') values.extend(range(begin, end)) - return list(set(values)) + return sorted(set(values)) def parse_alphanumeric_range(string):
getsentry__sentry-python-1545
Typo in setup.py for 1.9.1 release breaks `conda export` ### How do you use Sentry? Self-hosted/on-premise ### Version 1.9.1 ### Steps to Reproduce This line https://github.com/getsentry/sentry-python/commit/b7c0dc412a1505fff382732f567952c8a9572b60#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7R43 `'urllib3>=1.26.11"; python_version >="3.6"',` Has a stray `"` after the urllib3 version number. This passes by pipi but confuses conda, causing a very opaque error message when you try to run `conda export`: ``` InvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['('] ``` Steps to reproduce: ``` conda create -n test-sentry-sdk python=3.8 conda activate test-sentry-sdk pip install sentry-sdk==1.9.1 conda export ``` ### Expected Result Expect to get a conda environment yaml spec. ### Actual Result ``` InvalidVersionSpec: Invalid version '(>=1.26.11': unable to convert to expression tree: ['('] ```
[ { "content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.9.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"BSD\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n 'urllib3>=1.26.11\"; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.9.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"BSD\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version>=\"3.5\"',\n 'urllib3>=1.26.11; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 3dcb9eb658..8e370c68f2 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ def get_file_text(file_name): install_requires=[ 'urllib3>=1.25.7; python_version<="3.4"', 'urllib3>=1.26.9; python_version>="3.5"', - 'urllib3>=1.26.11"; python_version >="3.6"', + 'urllib3>=1.26.11; python_version >="3.6"', "certifi", ], extras_require={
wger-project__wger-170
BMI And Calorie Calculator Not Working Using this software in Linux Mint 13. When I enter my data into either the BMI calculator or the calorie estimator nothing happens. I have entered my height in cm and my weight in kgs. The BMI calculator says my BMI = 0. I'd be happy with 10.
[ { "content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\nfrom wger.core.models import UserProfile\n\nfrom wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem\nfrom wger.utils.widgets import Html5NumberInput\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnitChooserForm(forms.Form):\n '''\n A small form to select an amount and a unit for an ingredient\n '''\n amount = forms.DecimalField(decimal_places=2,\n max_digits=5,\n localize=True)\n unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n\n def __init__(self, *args, **kwargs):\n super(UnitChooserForm, self).__init__(*args, **kwargs)\n\n if len(args) and args[0].get('ingredient'):\n ingredient_id = args[0]['ingredient']\n\n elif kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient_id']\n\n else:\n ingredient_id = -1\n\n self.fields['unit'].queryset = IngredientWeightUnit.objects.filter(\n ingredient_id=ingredient_id).select_related()\n\n\nclass BmiForm(forms.ModelForm):\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n\n class Meta:\n model = UserProfile\n fields = ('height', )\n\n\nclass BmrForm(forms.ModelForm):\n '''\n Form for the basal metabolic rate\n '''\n weight = forms.DecimalField(widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('age', 'height', 'gender')\n\n\nclass PhysicalActivitiesForm(forms.ModelForm):\n '''\n Form for the additional physical activities\n '''\n class Meta:\n model = UserProfile\n fields = ('sleep_hours',\n 'work_hours',\n 'work_intensity',\n 'sport_hours',\n 'sport_intensity',\n 'freetime_hours',\n 'freetime_intensity')\n\n\nclass DailyCaloriesForm(forms.ModelForm):\n '''\n Form for the total daily calories needed\n '''\n\n base_calories = forms.IntegerField(label=_('Basic caloric intake'),\n help_text=_('Your basic caloric intake as calculated for '\n 'your data'),\n required=False,\n widget=Html5NumberInput())\n additional_calories = forms.IntegerField(label=_('Additional calories'),\n help_text=_('Additional calories to add to the base '\n 'rate (to substract, enter a negative '\n 'number)'),\n initial=0,\n required=False,\n widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('calories',)\n\n\nclass MealItemForm(forms.ModelForm):\n weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(),\n widget=forms.HiddenInput)\n\n class Meta:\n model = MealItem\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MealItemForm, self).__init__(*args, **kwargs)\n\n # Get the ingredient_id\n ingredient_id = None\n\n if kwargs.get('instance'):\n ingredient_id = kwargs['instance'].ingredient_id\n\n if kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient']\n\n # Filter the available ingredients\n if ingredient_id:\n self.fields['weight_unit'].queryset = \\\n IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id)\n", "path": "wger/nutrition/forms.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\nfrom wger.core.models import UserProfile\n\nfrom wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem\nfrom wger.utils.widgets import Html5NumberInput\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnitChooserForm(forms.Form):\n '''\n A small form to select an amount and a unit for an ingredient\n '''\n amount = forms.DecimalField(decimal_places=2,\n max_digits=5,\n localize=True)\n unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n\n def __init__(self, *args, **kwargs):\n super(UnitChooserForm, self).__init__(*args, **kwargs)\n\n if len(args) and args[0].get('ingredient'):\n ingredient_id = args[0]['ingredient']\n\n elif kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient_id']\n\n else:\n ingredient_id = -1\n\n self.fields['unit'].queryset = IngredientWeightUnit.objects.filter(\n ingredient_id=ingredient_id).select_related()\n\n\nclass BmiForm(forms.ModelForm):\n height = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999,\n label=_('Height (cm)'))\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n\n class Meta:\n model = UserProfile\n fields = ('height', )\n\n\nclass BmrForm(forms.ModelForm):\n '''\n Form for the basal metabolic rate\n '''\n weight = forms.DecimalField(widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('age', 'height', 'gender')\n\n\nclass PhysicalActivitiesForm(forms.ModelForm):\n '''\n Form for the additional physical activities\n '''\n class Meta:\n model = UserProfile\n fields = ('sleep_hours',\n 'work_hours',\n 'work_intensity',\n 'sport_hours',\n 'sport_intensity',\n 'freetime_hours',\n 'freetime_intensity')\n\n\nclass DailyCaloriesForm(forms.ModelForm):\n '''\n Form for the total daily calories needed\n '''\n\n base_calories = forms.IntegerField(label=_('Basic caloric intake'),\n help_text=_('Your basic caloric intake as calculated for '\n 'your data'),\n required=False,\n widget=Html5NumberInput())\n additional_calories = forms.IntegerField(label=_('Additional calories'),\n help_text=_('Additional calories to add to the base '\n 'rate (to substract, enter a negative '\n 'number)'),\n initial=0,\n required=False,\n widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('calories',)\n\n\nclass MealItemForm(forms.ModelForm):\n weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(),\n widget=forms.HiddenInput)\n\n class Meta:\n model = MealItem\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MealItemForm, self).__init__(*args, **kwargs)\n\n # Get the ingredient_id\n ingredient_id = None\n\n if kwargs.get('instance'):\n ingredient_id = kwargs['instance'].ingredient_id\n\n if kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient']\n\n # Filter the available ingredients\n if ingredient_id:\n self.fields['weight_unit'].queryset = \\\n IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id)\n", "path": "wger/nutrition/forms.py" } ]
diff --git a/wger/nutrition/forms.py b/wger/nutrition/forms.py index 7f5c6fdfe..27d2dc2ef 100644 --- a/wger/nutrition/forms.py +++ b/wger/nutrition/forms.py @@ -55,6 +55,9 @@ def __init__(self, *args, **kwargs): class BmiForm(forms.ModelForm): + height = forms.DecimalField(widget=Html5NumberInput(), + max_value=999, + label=_('Height (cm)')) weight = forms.DecimalField(widget=Html5NumberInput(), max_value=999)
xonsh__xonsh-89
setup.py install fails if working directory not location of setup.py C:\Python34>.\python.exe \temp\xonsh-0.1.2\setup.py install ... Traceback (most recent call last): File "\temp\xonsh-0.1.2\setup.py", line 114, in <module> main() File "\temp\xonsh-0.1.2\setup.py", line 50, in main with open('README.rst', 'r') as f: FileNotFoundError: [Errno 2] No such file or directory: 'README.rst'
[ { "content": "#!/usr/bin/env python\n# coding=utf-8\n\"\"\"The xonsh installer.\"\"\"\nfrom __future__ import print_function, unicode_literals\nimport os\nimport sys\ntry:\n from setuptools import setup\n from setuptools.command.sdist import sdist\n from setuptools.command.install import install\n HAVE_SETUPTOOLS = True\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.sdist import sdist as sdist\n from distutils.command.install import install as install\n HAVE_SETUPTOOLS = False\n\nVERSION = '0.1.2'\n\nTABLES = ['xonsh/lexer_table.py', 'xonsh/parser_table.py']\n\ndef clean_tables():\n for f in TABLES:\n if os.path.isfile(f):\n os.remove(f)\n print('Remove ' + f)\n\ndef build_tables():\n print('Building lexer and parser tables.')\n sys.path.insert(0, os.path.dirname(__file__))\n from xonsh.parser import Parser\n Parser(lexer_table='lexer_table', yacc_table='parser_table',\n outputdir='xonsh')\n sys.path.pop(0)\n\nclass xinstall(install):\n def run(self):\n clean_tables()\n build_tables()\n install.run(self)\n\nclass xsdist(sdist):\n def make_release_tree(self, basedir, files):\n clean_tables()\n build_tables()\n sdist.make_release_tree(self, basedir, files)\n\ndef main():\n if sys.version_info[0] < 3:\n sys.exit('xonsh currently requires Python 3.4+')\n print(logo)\n with open('README.rst', 'r') as f:\n readme = f.read()\n skw = dict(\n name='xonsh',\n description='an exotic, usable shell',\n long_description=readme,\n license='BSD',\n version=VERSION,\n author='Anthony Scopatz',\n maintainer='Anthony Scopatz',\n author_email='[email protected]',\n url='https://github.com/scopatz/xonsh',\n platforms='Cross Platform',\n classifiers = ['Programming Language :: Python :: 3'],\n packages=['xonsh'],\n scripts=['scripts/xonsh'],\n cmdclass={'install': xinstall, 'sdist': xsdist},\n )\n if HAVE_SETUPTOOLS:\n skw['setup_requires'] = ['ply']\n skw['install_requires'] = ['ply']\n setup(**skw)\n\nlogo = \"\"\"\n ╓██▄ \n ╙██▀██╕ \n ▐██4Φ█▀█▌ \n ²██▄███▀██^██ \n -███╩▀ \" ╒▄█████▀█ \n ║██▀▀W╤▄▀ ▐║█╘ ╝█ \n ▄m▀%Φ▀▀ ╝*\" ,α█████▓▄,▄▀Γ\"▀╕ \n \"▀██¼\" ▄═╦█╟║█▀ ╓ `^` ,▄ ╢╕ \n ,▀╫M█▐j╓╟▀ ╔▓▄█▀ '║ ╔ ╣║▌ ▀▄ \n ▄m▀▀███╬█╝▀ █▀^ \"ÜM j▐╟╫╨▒ ╙▀≡═╤═m▀╗ \n █æsæ╓ ╕, ,▄Ä ▐'╕H LU ║║╠╫Å^2=⌐ █ \n ▄æ%Å███╠█ª╙▄█▀ $1╙ ║║╟╫╩*T▄ ▌ \n ╙╗%▄,╦██▌█▌█╢M ╕ M║║║║█═⌐ⁿ\"^ ╫ \n ╙╣▀████@█░█ ▌╕╕ ` ▌║▐▐║█D═≈⌐¬ⁿ s ║⌐ \n ╙╬███▓║█` ▌╚ ╕ ╕▌║▐▐╣▌⌐*▒▒Dù` ▐▌ \n ╙╬██╨U█ ╟ $ ▌ ▌▌▐▐▐M█▄═≤⌐% ╓⌐ ▌ \n ║║█▄▌║ ╟ ▌ ▌M▐▐▐M█▀▒▒▒22, ▐▌ \n ███╙^▌ ║ ▌ ⌐M▐▐▐M█≤⌐⌐¬── ▐M \n ║██ ▌╙ ╓ H║ ▌╒ M║▐▐M█\"^^^^^\"ⁿ ║ \n ██╕╙@▓ ╕ ▌║ H' ║▐▐▐█══=.,,, █ \n ╙█▓╔╚╚█ ╠ ▌└╒ ▌▐ ╚║║║▀****ⁿ - ╓▌ \n ╙█▌¼V╚▌ ▌ ╕ ▌ ║╒ ║ ▌▒╠█▀≤≤≤≤≤⌐ █ \n ╙█▌╔█╚▌ ┘ M ▌║ ╫ UUM██J^^\" ▐▌ \n ╙██╙█╙▌ ╕$j ▐⌐▌ ▌║╝╟█Å%%%≈═ █ \n ╙╣█╣█^▌ ╠║▐ ║ ▌▐.DU██^[\"\"ⁿ -╒▌ \n ▀█▄█`▌ ░M▀ ▌▐ Å£╝╝█╜%≈═╓\"\"w ⁿ⌐ █ \n `▀▄▀`▌ ▌█▐⌐║▐UW╖██%≤═░*─ =z ▄Γ \n ╙██╙▄▌█ ▌Å╛╣██╨%╤ƒⁿ= -` ▄┘ \n █▌╢▓▌▌ W £6█╤,\"ⁿ ` ▄≡▀▀▀ \n █\"█▌▌╟Å╓█╓█▀%` ▄▀ \n ╙▌██`▒U▓U█%╗* █ \n ▌╫║ ▌ÅÅ║▀╛¬` `\"█ \n ▌╫ ╫╟ █▄ ~╦%▒╥4^ \n ▌▌ \"M█ `▀╕ X╕\"╗▄▀^ \n █▌ ╓M ╙▀e▀▀^ \n ╙██▄▄▀ \n ^^ \n\"\"\"\n\nif __name__ == '__main__':\n main()\n\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# coding=utf-8\n\"\"\"The xonsh installer.\"\"\"\nfrom __future__ import print_function, unicode_literals\nimport os\nimport sys\ntry:\n from setuptools import setup\n from setuptools.command.sdist import sdist\n from setuptools.command.install import install\n HAVE_SETUPTOOLS = True\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.sdist import sdist as sdist\n from distutils.command.install import install as install\n HAVE_SETUPTOOLS = False\n\nVERSION = '0.1.2'\n\nTABLES = ['xonsh/lexer_table.py', 'xonsh/parser_table.py']\n\ndef clean_tables():\n for f in TABLES:\n if os.path.isfile(f):\n os.remove(f)\n print('Remove ' + f)\n\ndef build_tables():\n print('Building lexer and parser tables.')\n sys.path.insert(0, os.path.dirname(__file__))\n from xonsh.parser import Parser\n Parser(lexer_table='lexer_table', yacc_table='parser_table',\n outputdir='xonsh')\n sys.path.pop(0)\n\nclass xinstall(install):\n def run(self):\n clean_tables()\n build_tables()\n install.run(self)\n\nclass xsdist(sdist):\n def make_release_tree(self, basedir, files):\n clean_tables()\n build_tables()\n sdist.make_release_tree(self, basedir, files)\n\ndef main():\n if sys.version_info[0] < 3:\n sys.exit('xonsh currently requires Python 3.4+')\n print(logo)\n with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'r') as f:\n readme = f.read()\n skw = dict(\n name='xonsh',\n description='an exotic, usable shell',\n long_description=readme,\n license='BSD',\n version=VERSION,\n author='Anthony Scopatz',\n maintainer='Anthony Scopatz',\n author_email='[email protected]',\n url='https://github.com/scopatz/xonsh',\n platforms='Cross Platform',\n classifiers = ['Programming Language :: Python :: 3'],\n packages=['xonsh'],\n scripts=['scripts/xonsh'],\n cmdclass={'install': xinstall, 'sdist': xsdist},\n )\n if HAVE_SETUPTOOLS:\n skw['setup_requires'] = ['ply']\n skw['install_requires'] = ['ply']\n setup(**skw)\n\nlogo = \"\"\"\n ╓██▄ \n ╙██▀██╕ \n ▐██4Φ█▀█▌ \n ²██▄███▀██^██ \n -███╩▀ \" ╒▄█████▀█ \n ║██▀▀W╤▄▀ ▐║█╘ ╝█ \n ▄m▀%Φ▀▀ ╝*\" ,α█████▓▄,▄▀Γ\"▀╕ \n \"▀██¼\" ▄═╦█╟║█▀ ╓ `^` ,▄ ╢╕ \n ,▀╫M█▐j╓╟▀ ╔▓▄█▀ '║ ╔ ╣║▌ ▀▄ \n ▄m▀▀███╬█╝▀ █▀^ \"ÜM j▐╟╫╨▒ ╙▀≡═╤═m▀╗ \n █æsæ╓ ╕, ,▄Ä ▐'╕H LU ║║╠╫Å^2=⌐ █ \n ▄æ%Å███╠█ª╙▄█▀ $1╙ ║║╟╫╩*T▄ ▌ \n ╙╗%▄,╦██▌█▌█╢M ╕ M║║║║█═⌐ⁿ\"^ ╫ \n ╙╣▀████@█░█ ▌╕╕ ` ▌║▐▐║█D═≈⌐¬ⁿ s ║⌐ \n ╙╬███▓║█` ▌╚ ╕ ╕▌║▐▐╣▌⌐*▒▒Dù` ▐▌ \n ╙╬██╨U█ ╟ $ ▌ ▌▌▐▐▐M█▄═≤⌐% ╓⌐ ▌ \n ║║█▄▌║ ╟ ▌ ▌M▐▐▐M█▀▒▒▒22, ▐▌ \n ███╙^▌ ║ ▌ ⌐M▐▐▐M█≤⌐⌐¬── ▐M \n ║██ ▌╙ ╓ H║ ▌╒ M║▐▐M█\"^^^^^\"ⁿ ║ \n ██╕╙@▓ ╕ ▌║ H' ║▐▐▐█══=.,,, █ \n ╙█▓╔╚╚█ ╠ ▌└╒ ▌▐ ╚║║║▀****ⁿ - ╓▌ \n ╙█▌¼V╚▌ ▌ ╕ ▌ ║╒ ║ ▌▒╠█▀≤≤≤≤≤⌐ █ \n ╙█▌╔█╚▌ ┘ M ▌║ ╫ UUM██J^^\" ▐▌ \n ╙██╙█╙▌ ╕$j ▐⌐▌ ▌║╝╟█Å%%%≈═ █ \n ╙╣█╣█^▌ ╠║▐ ║ ▌▐.DU██^[\"\"ⁿ -╒▌ \n ▀█▄█`▌ ░M▀ ▌▐ Å£╝╝█╜%≈═╓\"\"w ⁿ⌐ █ \n `▀▄▀`▌ ▌█▐⌐║▐UW╖██%≤═░*─ =z ▄Γ \n ╙██╙▄▌█ ▌Å╛╣██╨%╤ƒⁿ= -` ▄┘ \n █▌╢▓▌▌ W £6█╤,\"ⁿ ` ▄≡▀▀▀ \n █\"█▌▌╟Å╓█╓█▀%` ▄▀ \n ╙▌██`▒U▓U█%╗* █ \n ▌╫║ ▌ÅÅ║▀╛¬` `\"█ \n ▌╫ ╫╟ █▄ ~╦%▒╥4^ \n ▌▌ \"M█ `▀╕ X╕\"╗▄▀^ \n █▌ ╓M ╙▀e▀▀^ \n ╙██▄▄▀ \n ^^ \n\"\"\"\n\nif __name__ == '__main__':\n main()\n\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 51089c25aa..563c034eb5 100755 --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ def main(): if sys.version_info[0] < 3: sys.exit('xonsh currently requires Python 3.4+') print(logo) - with open('README.rst', 'r') as f: + with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'r') as f: readme = f.read() skw = dict( name='xonsh',
ResonantGeoData__ResonantGeoData-223
_convert_to_cog function doesn't generate COG The current _convert_to_cog function (https://github.com/ResonantGeoData/ResonantGeoData/blob/master/rgd/geodata/models/imagery/subsample.py#L32-L51) doesn't output a COG; it just outputs a tiled tiff file. GDAL has two separate format writers; one for regular TIFF and one for COG. Without the '-of COG' option, the regular tiff writer is invoked. The options available to the two writers differ as well: for COG you can specify a predictor with a string value, for regular tiff you must specify a numeric predictor value. Using lzw compression, I'd recommend the following options: `['-co', 'COMPRESS=LZW', '-co', 'PREDICTOR=YES', '-of', 'COG']`. The default block (tile) size in the COG writer is 512 pixels; some tile servers or consumers seem to prefer 256; if so, add `-co BLOCKSIZE=256`. You can use tiffdump to see that COG files have multiple directories at different resolutions while non-COG files have a single directory.
[ { "content": "\"\"\"Tasks for subsampling images with GDAL.\"\"\"\nimport os\nimport tempfile\n\nfrom celery.utils.log import get_task_logger\nfrom django.conf import settings\nfrom girder_utils.files import field_file_to_local_path\nfrom osgeo import gdal\n\nfrom ..common import ArbitraryFile\nfrom .base import ConvertedImageFile\n\nlogger = get_task_logger(__name__)\n\n\ndef _gdal_translate(source_field, output_field, **kwargs):\n workdir = getattr(settings, 'GEODATA_WORKDIR', None)\n tmpdir = tempfile.mkdtemp(dir=workdir)\n\n with field_file_to_local_path(source_field) as file_path:\n logger.info(f'The image file path: {file_path}')\n output_path = os.path.join(tmpdir, 'subsampled_' + os.path.basename(file_path))\n ds = gdal.Open(str(file_path))\n ds = gdal.Translate(output_path, ds, **kwargs)\n ds = None\n\n output_field.save(os.path.basename(output_path), open(output_path, 'rb'))\n\n return\n\n\ndef convert_to_cog(cog_id):\n \"\"\"Populate ConvertedImageFile with COG file.\"\"\"\n options = [\n '-co',\n 'COMPRESS=LZW',\n '-co',\n 'TILED=YES',\n ]\n cog = ConvertedImageFile.objects.get(id=cog_id)\n cog.converted_file = ArbitraryFile()\n src = cog.source_image.image_file.imagefile.file\n output = cog.converted_file.file\n _gdal_translate(src, output, options=options)\n cog.converted_file.save()\n cog.save(\n update_fields=[\n 'converted_file',\n ]\n )\n return\n", "path": "rgd/geodata/models/imagery/subsample.py" } ]
[ { "content": "\"\"\"Tasks for subsampling images with GDAL.\"\"\"\nimport os\nimport tempfile\n\nfrom celery.utils.log import get_task_logger\nfrom django.conf import settings\nfrom girder_utils.files import field_file_to_local_path\nfrom osgeo import gdal\n\nfrom ..common import ArbitraryFile\nfrom .base import ConvertedImageFile\n\nlogger = get_task_logger(__name__)\n\n\ndef _gdal_translate(source_field, output_field, **kwargs):\n workdir = getattr(settings, 'GEODATA_WORKDIR', None)\n tmpdir = tempfile.mkdtemp(dir=workdir)\n\n with field_file_to_local_path(source_field) as file_path:\n logger.info(f'The image file path: {file_path}')\n output_path = os.path.join(tmpdir, 'subsampled_' + os.path.basename(file_path))\n ds = gdal.Open(str(file_path))\n ds = gdal.Translate(output_path, ds, **kwargs)\n ds = None\n\n output_field.save(os.path.basename(output_path), open(output_path, 'rb'))\n\n return\n\n\ndef convert_to_cog(cog_id):\n \"\"\"Populate ConvertedImageFile with COG file.\"\"\"\n options = [\n '-co',\n 'COMPRESS=LZW',\n '-co',\n 'PREDICTOR=YES',\n '-of',\n 'COG',\n '-co',\n 'BLOCKSIZE=256',\n ]\n cog = ConvertedImageFile.objects.get(id=cog_id)\n cog.converted_file = ArbitraryFile()\n src = cog.source_image.image_file.imagefile.file\n output = cog.converted_file.file\n _gdal_translate(src, output, options=options)\n cog.converted_file.save()\n cog.save(\n update_fields=[\n 'converted_file',\n ]\n )\n return\n", "path": "rgd/geodata/models/imagery/subsample.py" } ]
diff --git a/rgd/geodata/models/imagery/subsample.py b/rgd/geodata/models/imagery/subsample.py index d2fc62a30..23e0fc67b 100644 --- a/rgd/geodata/models/imagery/subsample.py +++ b/rgd/geodata/models/imagery/subsample.py @@ -35,7 +35,11 @@ def convert_to_cog(cog_id): '-co', 'COMPRESS=LZW', '-co', - 'TILED=YES', + 'PREDICTOR=YES', + '-of', + 'COG', + '-co', + 'BLOCKSIZE=256', ] cog = ConvertedImageFile.objects.get(id=cog_id) cog.converted_file = ArbitraryFile()
holoviz__panel-2555
TQDM.ipynb reference raises exception: AttributeError: Column.value property descriptor does not exist I'm on Panel 0.12.0, Param 1.11.1, Bokeh: 2.3.3 If I run the TQDM.ipynb reference notebook top to bottom and click one of the buttons I get a `AttributeError: Column.value property descriptor does not exist` error. ![image](https://user-images.githubusercontent.com/42288570/126439660-554b5d01-57e6-481b-8069-0e3f689c9599.png)
[ { "content": "import os\nimport sys\n\nfrom math import pi\n\nimport numpy as np\nimport param\n\nfrom bokeh.plotting import figure\nfrom bokeh.models import ColumnDataSource\nfrom tqdm.asyncio import tqdm as _tqdm\n\nfrom ..layout import Column, Row\nfrom ..models import (\n HTML, Progress as _BkProgress, TrendIndicator as _BkTrendIndicator\n)\nfrom ..pane.markup import Str\nfrom ..reactive import SyncableData\nfrom ..util import escape, updating\nfrom ..viewable import Viewable\nfrom .base import Widget\n\nRED = \"#d9534f\"\nGREEN = \"#5cb85c\"\nBLUE = \"#428bca\"\n\nclass Indicator(Widget):\n \"\"\"\n Indicator is a baseclass for widgets which indicate some state.\n \"\"\"\n\n sizing_mode = param.ObjectSelector(default='fixed', objects=[\n 'fixed', 'stretch_width', 'stretch_height', 'stretch_both',\n 'scale_width', 'scale_height', 'scale_both', None])\n\n __abstract = True\n\n\nclass BooleanIndicator(Indicator):\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n __abstract = True\n\n\nclass BooleanStatus(BooleanIndicator):\n\n color = param.ObjectSelector(default='dark', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n height = param.Integer(default=20, doc=\"\"\"\n height of the circle.\"\"\")\n\n width = param.Integer(default=20, doc=\"\"\"\n Width of the circle.\"\"\")\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n _rename = {'color': None}\n\n _source_transforms = {'value': None}\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n value = msg.pop('value', None)\n if value is None:\n return msg\n msg['css_classes'] = ['dot-filled', self.color] if value else ['dot']\n return msg\n\n\nclass LoadingSpinner(BooleanIndicator):\n\n bgcolor = param.ObjectSelector(default='light', objects=['dark', 'light'])\n\n color = param.ObjectSelector(default='dark', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n height = param.Integer(default=125, doc=\"\"\"\n height of the circle.\"\"\")\n\n width = param.Integer(default=125, doc=\"\"\"\n Width of the circle.\"\"\")\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n _rename = {'color': None, 'bgcolor': None}\n\n _source_transforms = {'value': None}\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n value = msg.pop('value', None)\n if value is None:\n return msg\n color_cls = f'{self.color}-{self.bgcolor}'\n msg['css_classes'] = ['loader', 'spin', color_cls] if value else ['loader', self.bgcolor]\n return msg\n\n\nclass ValueIndicator(Indicator):\n \"\"\"\n A ValueIndicator provides a visual representation for a numeric\n value.\n \"\"\"\n\n value = param.Number(default=None, allow_None=True)\n\n __abstract = True\n\n\nclass Progress(ValueIndicator):\n\n active = param.Boolean(default=True, doc=\"\"\"\n If no value is set the active property toggles animation of the\n progress bar on and off.\"\"\")\n\n bar_color = param.ObjectSelector(default='success', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n max = param.Integer(default=100, doc=\"The maximum value of the progress bar.\")\n\n value = param.Integer(default=None, bounds=(-1, None), doc=\"\"\"\n The current value of the progress bar. If set to None the progress\n bar will be indeterminate and animate depending on the active\n parameter. If set to -1 the progress bar will be empty.\"\"\")\n\n _rename = {'name': None}\n\n _widget_type = _BkProgress\n\n @param.depends('max', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = (-1, self.max)\n\n def __init__(self,**params):\n super().__init__(**params)\n self._update_value_bounds()\n\n\nclass Number(ValueIndicator):\n \"\"\"\n The Number indicator renders the value as text optionally colored\n according to the color thresholds.\n \"\"\"\n\n default_color = param.String(default='black')\n\n colors = param.List(default=None)\n\n format = param.String(default='{value}')\n\n font_size = param.String(default='54pt')\n\n nan_format = param.String(default='-', doc=\"\"\"\n How to format nan values.\"\"\")\n\n title_size = param.String(default='18pt')\n\n _rename = {}\n\n _source_transforms = {\n 'value': None, 'colors': None, 'default_color': None,\n 'font_size': None, 'format': None, 'nan_format': None,\n 'title_size': None\n }\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n font_size = msg.pop('font_size', self.font_size)\n title_font_size = msg.pop('title_size', self.title_size)\n name = msg.pop('name', self.name)\n format = msg.pop('format', self.format)\n value = msg.pop('value', self.value)\n nan_format = msg.pop('nan_format', self.nan_format)\n color = msg.pop('default_color', self.default_color)\n colors = msg.pop('colors', self.colors)\n for val, clr in (colors or [])[::-1]:\n if value is not None and value <= val:\n color = clr\n if value is None:\n value = float('nan')\n value = format.format(value=value).replace('nan', nan_format)\n text = f'<div style=\"font-size: {font_size}; color: {color}\">{value}</div>'\n if self.name:\n title_font_size = msg.pop('title_size', self.title_size)\n text = f'<div style=\"font-size: {title_font_size}; color: {color}\">{name}</div>\\n{text}'\n msg['text'] = escape(text)\n return msg\n\n\nclass String(ValueIndicator):\n \"\"\"\n The String indicator renders a string with a title.\n \"\"\"\n\n default_color = param.String(default='black')\n\n font_size = param.String(default='54pt')\n\n title_size = param.String(default='18pt')\n\n value = param.String(default=None, allow_None=True)\n\n _rename = {}\n\n _source_transforms = {\n 'value': None, 'default_color': None, 'font_size': None, 'title_size': None\n }\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n font_size = msg.pop('font_size', self.font_size)\n title_font_size = msg.pop('title_size', self.title_size)\n name = msg.pop('name', self.name)\n value = msg.pop('value', self.value)\n color = msg.pop('default_color', self.default_color)\n text = f'<div style=\"font-size: {font_size}; color: {color}\">{value}</div>'\n if self.name:\n title_font_size = msg.pop('title_size', self.title_size)\n text = f'<div style=\"font-size: {title_font_size}; color: {color}\">{name}</div>\\n{text}'\n msg['text'] = escape(text)\n return msg\n\n\nclass Gauge(ValueIndicator):\n \"\"\"\n A Gauge represents a value in some range as a position on\n speedometer or gauge. It is similar to a Dial but visually a lot\n busier.\n \"\"\"\n\n annulus_width = param.Integer(default=10, doc=\"\"\"\n Width of the gauge annulus.\"\"\")\n\n bounds = param.Range(default=(0, 100), doc=\"\"\"\n The upper and lower bound of the dial.\"\"\")\n\n colors = param.List(default=None, doc=\"\"\"\n Color thresholds for the Gauge, specified as a list of tuples\n of the fractional threshold and the color to switch to.\"\"\")\n\n custom_opts = param.Dict(doc=\"\"\"\n Additional options to pass to the ECharts Gauge definition.\"\"\")\n\n height = param.Integer(default=300, bounds=(0, None))\n\n end_angle = param.Number(default=-45, doc=\"\"\"\n Angle at which the gauge ends.\"\"\")\n\n format = param.String(default='{value}%', doc=\"\"\"\n Formatting string for the value indicator.\"\"\")\n\n num_splits = param.Integer(default=10, doc=\"\"\"\n Number of splits along the gauge.\"\"\")\n\n show_ticks = param.Boolean(default=True, doc=\"\"\"\n Whether to show ticks along the dials.\"\"\")\n\n show_labels = param.Boolean(default=True, doc=\"\"\"\n Whether to show tick labels along the dials.\"\"\")\n\n start_angle = param.Number(default=225, doc=\"\"\"\n Angle at which the gauge starts.\"\"\")\n\n tooltip_format = param.String(default='{b} : {c}%', doc=\"\"\"\n Formatting string for the hover tooltip.\"\"\")\n\n title_size = param.Integer(default=18, doc=\"\"\"\n Size of title font.\"\"\")\n\n value = param.Number(default=25, doc=\"\"\"\n Value to indicate on the gauge a value within the declared bounds.\"\"\")\n\n width = param.Integer(default=300, bounds=(0, None))\n\n _rename = {}\n\n _source_transforms = {\n 'annulus_width': None, 'bounds': None, 'colors': None,\n 'custom_opts': None, 'end_angle': None, 'format': None,\n 'num_splits': None, 'show_ticks': None, 'show_labels': None,\n 'start_angle': None, 'tooltip_format': None, 'title_size': None,\n 'value': None\n }\n\n @property\n def _widget_type(self):\n if 'panel.models.echarts' not in sys.modules:\n from ..models.echarts import ECharts\n else:\n ECharts = getattr(sys.modules['panel.models.echarts'], 'ECharts')\n return ECharts\n\n def __init__(self, **params):\n super().__init__(**params)\n self._update_value_bounds()\n\n @param.depends('bounds', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = self.bounds\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n vmin, vmax = msg.pop('bounds', self.bounds)\n msg['data'] = {\n 'tooltip': {\n 'formatter': msg.pop('tooltip_format', self.tooltip_format)\n },\n 'series': [{\n 'name': 'Gauge',\n 'type': 'gauge',\n 'axisTick': {'show': msg.pop('show_ticks', self.show_ticks)},\n 'axisLabel': {'show': msg.pop('show_labels', self.show_labels)},\n 'title': {'fontWeight': 'bold', 'fontSize': msg.pop('title_size', self.title_size)},\n 'splitLine': {'show': True},\n 'radius': '100%',\n 'detail': {'formatter': msg.pop('format', self.format)},\n 'min': vmin,\n 'max': vmax,\n 'startAngle': msg.pop('start_angle', self.start_angle),\n 'endAngle': msg.pop('end_angle', self.end_angle),\n 'splitNumber': msg.pop('num_splits', self.num_splits),\n 'data': [{'value': msg.pop('value', self.value), 'name': self.name}],\n 'axisLine': {\n 'lineStyle': {\n 'width': msg.pop('annulus_width', self.annulus_width),\n }\n }\n }]\n }\n colors = msg.pop('colors', self.colors)\n if colors:\n msg['data']['series'][0]['axisLine']['lineStyle']['color'] = colors\n custom_opts = msg.pop('custom_opts', self.custom_opts)\n if custom_opts:\n gauge = msg['data']['series'][0]\n for k, v in custom_opts.items():\n if k not in gauge or not isinstance(gauge[k], dict):\n gauge[k] = v\n else:\n gauge[k].update(v)\n return msg\n\n\nclass Dial(ValueIndicator):\n \"\"\"\n A Dial represents a value in some range as a position on an\n annular dial. It is similar to a Gauge but more minimal visually.\n \"\"\"\n\n annulus_width = param.Number(default=0.2, doc=\"\"\"\n Width of the radial annulus as a fraction of the total.\"\"\")\n\n bounds = param.Range(default=(0, 100), doc=\"\"\"\n The upper and lower bound of the dial.\"\"\")\n\n colors = param.List(default=None, doc=\"\"\"\n Color thresholds for the Dial, specified as a list of tuples\n of the fractional threshold and the color to switch to.\"\"\")\n\n default_color = param.String(default='lightblue', doc=\"\"\"\n Color of the radial annulus if not color thresholds are supplied.\"\"\")\n\n end_angle = param.Number(default=25, doc=\"\"\"\n Angle at which the dial ends.\"\"\")\n\n format = param.String(default='{value}%', doc=\"\"\"\n Formatting string for the value indicator and lower/upper bounds.\"\"\")\n\n height = param.Integer(default=250, bounds=(1, None))\n\n nan_format = param.String(default='-', doc=\"\"\"\n How to format nan values.\"\"\")\n\n needle_color = param.String(default='black', doc=\"\"\"\n Color of the Dial needle.\"\"\")\n\n needle_width = param.Number(default=0.1, doc=\"\"\"\n Radial width of the needle.\"\"\")\n\n start_angle = param.Number(default=-205, doc=\"\"\"\n Angle at which the dial starts.\"\"\")\n\n tick_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial min/max labels.\"\"\")\n\n title_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial title.\"\"\")\n\n unfilled_color = param.String(default='whitesmoke', doc=\"\"\"\n Color of the unfilled region of the Dial.\"\"\")\n\n value_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial value label.\"\"\")\n\n value = param.Number(default=25, allow_None=True, doc=\"\"\"\n Value to indicate on the dial a value within the declared bounds.\"\"\")\n\n width = param.Integer(default=250, bounds=(1, None))\n\n _manual_params = [\n 'value', 'start_angle', 'end_angle', 'bounds',\n 'annulus_width', 'format', 'background', 'needle_width',\n 'tick_size', 'title_size', 'value_size', 'colors',\n 'default_color', 'unfilled_color', 'height',\n 'width', 'nan_format', 'needle_color'\n ]\n\n _data_params = _manual_params\n\n _rename = {'background': 'background_fill_color'}\n\n def __init__(self, **params):\n super().__init__(**params)\n self._update_value_bounds()\n\n @param.depends('bounds', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = self.bounds\n\n def _get_data(self):\n vmin, vmax = self.bounds\n value = self.value\n if value is None:\n value = float('nan')\n fraction = (value-vmin)/(vmax-vmin)\n start = (np.radians(360-self.start_angle) - pi % (2*pi)) + pi\n end = (np.radians(360-self.end_angle) - pi % (2*pi)) + pi\n distance = (abs(end-start) % (pi*2))\n if end>start:\n distance = (pi*2)-distance\n radial_fraction = distance*fraction\n angle = start if np.isnan(fraction) else (start-radial_fraction)\n inner_radius = 1-self.annulus_width\n\n color = self.default_color\n for val, clr in (self.colors or [])[::-1]:\n if fraction <= val:\n color = clr\n\n annulus_data = {\n 'starts': np.array([start, angle]),\n 'ends' : np.array([angle, end]),\n 'color': [color, self.unfilled_color],\n 'radius': np.array([inner_radius, inner_radius])\n }\n\n x0s, y0s, x1s, y1s, clrs = [], [], [], [], []\n colors = self.colors or []\n for (val, _), (_, clr) in zip(colors[:-1], colors[1:]):\n tangle = start-(distance*val)\n if (vmin + val * (vmax-vmin)) <= value:\n continue\n x0, y0 = np.cos(tangle), np.sin(tangle)\n x1, y1 = x0*inner_radius, y0*inner_radius\n x0s.append(x0)\n y0s.append(y0)\n x1s.append(x1)\n y1s.append(y1)\n clrs.append(clr)\n\n threshold_data = {\n 'x0': x0s, 'y0': y0s, 'x1': x1s, 'y1': y1s, 'color': clrs\n }\n\n center_radius = 1-self.annulus_width/2.\n x, y = np.cos(angle)*center_radius, np.sin(angle)*center_radius\n needle_start = pi+angle-(self.needle_width/2.)\n needle_end = pi+angle+(self.needle_width/2.)\n needle_data = {\n 'x': np.array([x]),\n 'y': np.array([y]),\n 'start': np.array([needle_start]),\n 'end': np.array([needle_end]),\n 'radius': np.array([center_radius])\n }\n\n value = self.format.format(value=value).replace('nan', self.nan_format)\n min_value = self.format.format(value=vmin)\n max_value = self.format.format(value=vmax)\n tminx, tminy = np.cos(start)*center_radius, np.sin(start)*center_radius\n tmaxx, tmaxy = np.cos(end)*center_radius, np.sin(end)*center_radius\n tmin_angle, tmax_angle = start+pi, end+pi % pi\n scale = (self.height/400)\n title_size = self.title_size if self.title_size else '%spt' % (scale*32)\n value_size = self.value_size if self.value_size else '%spt' % (scale*48)\n tick_size = self.tick_size if self.tick_size else '%spt' % (scale*18)\n\n text_data= {\n 'x': np.array([0, 0, tminx, tmaxx]),\n 'y': np.array([-.2, -.5, tminy, tmaxy]),\n 'text': [self.name, value, min_value, max_value],\n 'rot': np.array([0, 0, tmin_angle, tmax_angle]),\n 'size': [title_size, value_size, tick_size, tick_size],\n 'color': ['black', color, 'black', 'black']\n }\n return annulus_data, needle_data, threshold_data, text_data\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n params = self._process_param_change(self._init_params())\n model = figure(\n x_range=(-1,1), y_range=(-1,1), tools=[],\n outline_line_color=None, toolbar_location=None,\n width=self.width, height=self.height, **params\n )\n model.xaxis.visible = False\n model.yaxis.visible = False\n model.grid.visible = False\n\n annulus, needle, threshold, text = self._get_data()\n\n # Draw annulus\n annulus_source = ColumnDataSource(data=annulus, name='annulus_source')\n model.annular_wedge(\n x=0, y=0, inner_radius='radius', outer_radius=1, start_angle='starts',\n end_angle='ends', line_color='gray', color='color', direction='clock',\n source=annulus_source\n )\n\n # Draw needle\n needle_source = ColumnDataSource(data=needle, name='needle_source')\n model.wedge(\n x='x', y='y', radius='radius', start_angle='start', end_angle='end',\n fill_color=self.needle_color, line_color=self.needle_color,\n source=needle_source, name='needle_renderer'\n )\n\n # Draw thresholds\n threshold_source = ColumnDataSource(data=threshold, name='threshold_source')\n model.segment(\n x0='x0', x1='x1', y0='y0', y1='y1', line_color='color', source=threshold_source,\n line_width=2\n )\n\n # Draw labels\n text_source = ColumnDataSource(data=text, name='label_source')\n model.text(\n x='x', y='y', text='text', font_size='size', text_align='center',\n text_color='color', source=text_source, text_baseline='top',\n angle='rot'\n )\n\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n update_data = False\n for event in events:\n if event.name in ('width', 'height'):\n model.update(**{event.name: event.new})\n if event.name in self._data_params:\n update_data = True\n elif event.name == 'needle_color':\n needle_r = model.select(name='needle_renderer')\n needle_r.glyph.line_color = event.new\n needle_r.glyph.fill_color = event.new\n if not update_data:\n return\n annulus, needle, threshold, labels = self._get_data()\n model.select(name='annulus_source').data.update(annulus)\n model.select(name='needle_source').data.update(needle)\n model.select(name='threshold_source').data.update(threshold)\n model.select(name='label_source').data.update(labels)\n\n\nclass Trend(SyncableData, Indicator):\n \"\"\"\n The Trend indicator enables the user to display a Dashboard KPI Card.\n\n The card can be layout out as:\n\n * a column (text and plot on top of each other) or\n * a row (text and plot after each other)\n\n The text section is responsive and resizes on window resize.\n \"\"\"\n\n data = param.Parameter(doc=\"\"\"\n The plot data declared as a dictionary of arrays or a DataFrame.\"\"\")\n\n layout = param.ObjectSelector(default=\"column\", objects=[\"column\", \"row\"])\n\n plot_x = param.String(default=\"x\", doc=\"\"\"\n The name of the key in the plot_data to use on the x-axis.\"\"\")\n\n plot_y = param.String(default=\"y\", doc=\"\"\"\n The name of the key in the plot_data to use on the y-axis.\"\"\")\n\n plot_color = param.String(default=BLUE, doc=\"\"\"\n The color to use in the plot.\"\"\")\n\n plot_type = param.ObjectSelector(default=\"bar\", objects=[\"line\", \"step\", \"area\", \"bar\"], doc=\"\"\"\n The plot type to render the plot data as.\"\"\")\n\n pos_color = param.String(GREEN, doc=\"\"\"\n The color used to indicate a positive change.\"\"\")\n\n neg_color = param.String(RED, doc=\"\"\"\n The color used to indicate a negative change.\"\"\")\n\n title = param.String(doc=\"\"\"The title or a short description of the card\"\"\")\n\n value = param.Parameter(default='auto', doc=\"\"\"\n The primary value to be displayed.\"\"\")\n\n value_change = param.Parameter(default='auto', doc=\"\"\"\n A secondary value. For example the change in percent.\"\"\")\n\n _data_params = ['data']\n\n _manual_params = ['data']\n\n _rename = {'data': None, 'selection': None}\n\n _widget_type = _BkTrendIndicator\n\n def _get_data(self):\n if self.data is None:\n return None, {self.plot_x: [], self.plot_y: []}\n elif isinstance(self.data, dict):\n return self.data, self.data\n return self.data, ColumnDataSource.from_df(self.data)\n\n def _init_params(self):\n props = super()._init_params()\n self._processed, self._data = self._get_data()\n props['source'] = ColumnDataSource(data=self._data)\n return props\n\n def _trigger_auto_values(self):\n trigger = []\n if self.value == 'auto':\n trigger.append('value')\n if self.value_change == 'auto':\n trigger.append('value_change')\n if trigger:\n self.param.trigger(*trigger)\n\n @updating\n def _stream(self, stream, rollover=None):\n self._trigger_auto_values()\n super()._stream(stream, rollover)\n\n def _update_cds(self, *events):\n super()._update_cds(*events)\n self._trigger_auto_values()\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n ys = self._data.get(self.plot_y, [])\n if 'value' in msg and msg['value'] == 'auto':\n if len(ys):\n msg['value'] = ys[-1]\n else:\n msg['value'] = 0\n if 'value_change' in msg and msg['value_change'] == 'auto':\n if len(ys) > 1:\n y1, y2 = self._data.get(self.plot_y)[-2:]\n msg['value_change'] = 0 if y1 == 0 else (y2/y1 - 1)\n else:\n msg['value_change'] = 0\n return msg\n\n\nMARGIN = {\n \"text_pane\": {\"column\": (5, 10, 0, 10), \"row\": (0, 10, 0, 10)},\n \"progress\": {\"column\": (0, 10, 5, 10), \"row\": (12, 10, 0, 10)},\n}\n\n\n\nclass ptqdm(_tqdm):\n\n def __init__(self, *args, **kwargs):\n self._indicator = kwargs.pop('indicator')\n super().__init__(*args, **kwargs)\n\n def display(self, msg=None, pos=None, bar_style=None):\n super().display(msg, pos)\n style = self._indicator.text_pane.style or {}\n color = self.colour or 'black'\n self._indicator.text_pane.style = dict(style, color=color)\n if self.total is not None and self.n is not None:\n self._indicator.max = int(self.total) # Can be numpy.int64\n self._indicator.value = int(self.n)\n self._indicator.text = self._to_text(**self.format_dict)\n return True\n\n def _to_text(self, n, total, **kwargs):\n return self.format_meter(n, total, **{**kwargs, \"ncols\": 0})\n\n def close(self):\n super().close()\n if not self.leave:\n self._indicator.reset()\n return _tqdm\n\n\nclass Tqdm(Indicator):\n\n layout = param.ClassSelector(class_=(Column, Row), precedence=-1, constant=True, doc=\"\"\"\n The layout for the text and progress indicator.\"\"\",)\n\n max = param.Integer(default=100, doc=\"\"\"\n The maximum value of the progress indicator.\"\"\")\n\n progress = param.ClassSelector(class_=Progress, precedence=-1, doc=\"\"\"\n The Progress indicator used to display the progress.\"\"\",)\n\n text = param.String(default='', doc=\"\"\"\n The current tqdm style progress text.\"\"\")\n\n text_pane = param.ClassSelector(class_=Str, precedence=-1, doc=\"\"\"\n The pane to display the text to.\"\"\")\n\n value = param.Integer(default=0, bounds=(0, None), doc=\"\"\"\n The current value of the progress bar. If set to None the progress\n bar will be indeterminate and animate depending on the active\n parameter.\"\"\")\n\n margin = param.Parameter(default=0, doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n width = param.Integer(default=400, bounds=(0, None), doc=\"\"\"\n The width of the component (in pixels). This can be either\n fixed or preferred width, depending on width sizing policy.\"\"\")\n\n write_to_console = param.Boolean(default=False, doc=\"\"\"\n Whether or not to also write to the console.\"\"\")\n\n _layouts = {Row: 'row', Column: 'column'}\n\n def __init__(self, **params):\n layout = params.pop('layout', 'column')\n layout = self._layouts.get(layout, layout) \n if \"text_pane\" not in params:\n sizing_mode = 'stretch_width' if layout == 'column' else 'fixed'\n params[\"text_pane\"] = Str(\n None, min_height=20, min_width=280, sizing_mode=sizing_mode,\n margin=MARGIN[\"text_pane\"][layout],\n )\n if \"progress\" not in params:\n params[\"progress\"] = Progress(\n active=False,\n sizing_mode=\"stretch_width\",\n min_width=100,\n margin=MARGIN[\"progress\"][layout],\n )\n\n layout_params = {p: params.get(p, getattr(self, p)) for p in Viewable.param}\n if layout == 'row' or layout is Row:\n params['layout'] = Row(\n params['progress'], params['text_pane'], **layout_params\n )\n else:\n params['layout'] = Column(\n params['text_pane'], params['progress'], **layout_params\n )\n super().__init__(**params)\n\n self.param.watch(self._update_layout, list(Viewable.param))\n\n if self.value == 0:\n # Hack: to give progress the initial look\n self.progress.max = 100000\n self.progress.value = 1\n else:\n self.progress.max = self.max\n self.progress.value = self.value\n self.text_pane.object = self.text\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = self.layout._get_model(doc, root, parent, comm)\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _cleanup(self, root):\n super()._cleanup(root)\n self.layout._cleanup(root)\n\n def _update_layout(self, *events):\n self.layout.param.set_param(**{event.name: event.new for event in events})\n\n @param.depends(\"text\", watch=True)\n def _update_text(self):\n if self.text_pane:\n self.text_pane.object = self.text\n\n @param.depends(\"value\", watch=True)\n def _update_value(self):\n if self.progress:\n self.progress.value = self.value\n\n @param.depends(\"max\", watch=True)\n def _update_max(self):\n if self.progress:\n self.progress.max = self.max\n\n def __call__(self, *args, **kwargs):\n kwargs['indicator'] = self\n if not self.write_to_console:\n f = open(os.devnull, 'w')\n kwargs['file'] = f\n return ptqdm(*args, **kwargs)\n\n __call__.__doc__ = ptqdm.__doc__\n\n def pandas(self, *args, **kwargs):\n kwargs['indicator'] = self\n if not self.write_to_console and 'file' not in kwargs:\n f = open(os.devnull, 'w')\n kwargs['file'] = f\n return ptqdm.pandas(*args, **kwargs)\n\n def reset(self):\n \"\"\"Resets the parameters\"\"\"\n self.value = self.param.value.default\n self.text = self.param.text.default\n", "path": "panel/widgets/indicators.py" } ]
[ { "content": "import os\nimport sys\n\nfrom math import pi\n\nimport numpy as np\nimport param\n\nfrom bokeh.plotting import figure\nfrom bokeh.models import ColumnDataSource\nfrom tqdm.asyncio import tqdm as _tqdm\n\nfrom ..layout import Column, Row\nfrom ..models import (\n HTML, Progress as _BkProgress, TrendIndicator as _BkTrendIndicator\n)\nfrom ..pane.markup import Str\nfrom ..reactive import SyncableData\nfrom ..util import escape, updating\nfrom ..viewable import Viewable\nfrom .base import Widget\n\nRED = \"#d9534f\"\nGREEN = \"#5cb85c\"\nBLUE = \"#428bca\"\n\nclass Indicator(Widget):\n \"\"\"\n Indicator is a baseclass for widgets which indicate some state.\n \"\"\"\n\n sizing_mode = param.ObjectSelector(default='fixed', objects=[\n 'fixed', 'stretch_width', 'stretch_height', 'stretch_both',\n 'scale_width', 'scale_height', 'scale_both', None])\n\n __abstract = True\n\n\nclass BooleanIndicator(Indicator):\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n __abstract = True\n\n\nclass BooleanStatus(BooleanIndicator):\n\n color = param.ObjectSelector(default='dark', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n height = param.Integer(default=20, doc=\"\"\"\n height of the circle.\"\"\")\n\n width = param.Integer(default=20, doc=\"\"\"\n Width of the circle.\"\"\")\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n _rename = {'color': None}\n\n _source_transforms = {'value': None}\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n value = msg.pop('value', None)\n if value is None:\n return msg\n msg['css_classes'] = ['dot-filled', self.color] if value else ['dot']\n return msg\n\n\nclass LoadingSpinner(BooleanIndicator):\n\n bgcolor = param.ObjectSelector(default='light', objects=['dark', 'light'])\n\n color = param.ObjectSelector(default='dark', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n height = param.Integer(default=125, doc=\"\"\"\n height of the circle.\"\"\")\n\n width = param.Integer(default=125, doc=\"\"\"\n Width of the circle.\"\"\")\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n _rename = {'color': None, 'bgcolor': None}\n\n _source_transforms = {'value': None}\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n value = msg.pop('value', None)\n if value is None:\n return msg\n color_cls = f'{self.color}-{self.bgcolor}'\n msg['css_classes'] = ['loader', 'spin', color_cls] if value else ['loader', self.bgcolor]\n return msg\n\n\nclass ValueIndicator(Indicator):\n \"\"\"\n A ValueIndicator provides a visual representation for a numeric\n value.\n \"\"\"\n\n value = param.Number(default=None, allow_None=True)\n\n __abstract = True\n\n\nclass Progress(ValueIndicator):\n\n active = param.Boolean(default=True, doc=\"\"\"\n If no value is set the active property toggles animation of the\n progress bar on and off.\"\"\")\n\n bar_color = param.ObjectSelector(default='success', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n max = param.Integer(default=100, doc=\"The maximum value of the progress bar.\")\n\n value = param.Integer(default=None, bounds=(-1, None), doc=\"\"\"\n The current value of the progress bar. If set to None the progress\n bar will be indeterminate and animate depending on the active\n parameter. If set to -1 the progress bar will be empty.\"\"\")\n\n _rename = {'name': None}\n\n _widget_type = _BkProgress\n\n @param.depends('max', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = (-1, self.max)\n\n def __init__(self,**params):\n super().__init__(**params)\n self._update_value_bounds()\n\n\nclass Number(ValueIndicator):\n \"\"\"\n The Number indicator renders the value as text optionally colored\n according to the color thresholds.\n \"\"\"\n\n default_color = param.String(default='black')\n\n colors = param.List(default=None)\n\n format = param.String(default='{value}')\n\n font_size = param.String(default='54pt')\n\n nan_format = param.String(default='-', doc=\"\"\"\n How to format nan values.\"\"\")\n\n title_size = param.String(default='18pt')\n\n _rename = {}\n\n _source_transforms = {\n 'value': None, 'colors': None, 'default_color': None,\n 'font_size': None, 'format': None, 'nan_format': None,\n 'title_size': None\n }\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n font_size = msg.pop('font_size', self.font_size)\n title_font_size = msg.pop('title_size', self.title_size)\n name = msg.pop('name', self.name)\n format = msg.pop('format', self.format)\n value = msg.pop('value', self.value)\n nan_format = msg.pop('nan_format', self.nan_format)\n color = msg.pop('default_color', self.default_color)\n colors = msg.pop('colors', self.colors)\n for val, clr in (colors or [])[::-1]:\n if value is not None and value <= val:\n color = clr\n if value is None:\n value = float('nan')\n value = format.format(value=value).replace('nan', nan_format)\n text = f'<div style=\"font-size: {font_size}; color: {color}\">{value}</div>'\n if self.name:\n title_font_size = msg.pop('title_size', self.title_size)\n text = f'<div style=\"font-size: {title_font_size}; color: {color}\">{name}</div>\\n{text}'\n msg['text'] = escape(text)\n return msg\n\n\nclass String(ValueIndicator):\n \"\"\"\n The String indicator renders a string with a title.\n \"\"\"\n\n default_color = param.String(default='black')\n\n font_size = param.String(default='54pt')\n\n title_size = param.String(default='18pt')\n\n value = param.String(default=None, allow_None=True)\n\n _rename = {}\n\n _source_transforms = {\n 'value': None, 'default_color': None, 'font_size': None, 'title_size': None\n }\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n font_size = msg.pop('font_size', self.font_size)\n title_font_size = msg.pop('title_size', self.title_size)\n name = msg.pop('name', self.name)\n value = msg.pop('value', self.value)\n color = msg.pop('default_color', self.default_color)\n text = f'<div style=\"font-size: {font_size}; color: {color}\">{value}</div>'\n if self.name:\n title_font_size = msg.pop('title_size', self.title_size)\n text = f'<div style=\"font-size: {title_font_size}; color: {color}\">{name}</div>\\n{text}'\n msg['text'] = escape(text)\n return msg\n\n\nclass Gauge(ValueIndicator):\n \"\"\"\n A Gauge represents a value in some range as a position on\n speedometer or gauge. It is similar to a Dial but visually a lot\n busier.\n \"\"\"\n\n annulus_width = param.Integer(default=10, doc=\"\"\"\n Width of the gauge annulus.\"\"\")\n\n bounds = param.Range(default=(0, 100), doc=\"\"\"\n The upper and lower bound of the dial.\"\"\")\n\n colors = param.List(default=None, doc=\"\"\"\n Color thresholds for the Gauge, specified as a list of tuples\n of the fractional threshold and the color to switch to.\"\"\")\n\n custom_opts = param.Dict(doc=\"\"\"\n Additional options to pass to the ECharts Gauge definition.\"\"\")\n\n height = param.Integer(default=300, bounds=(0, None))\n\n end_angle = param.Number(default=-45, doc=\"\"\"\n Angle at which the gauge ends.\"\"\")\n\n format = param.String(default='{value}%', doc=\"\"\"\n Formatting string for the value indicator.\"\"\")\n\n num_splits = param.Integer(default=10, doc=\"\"\"\n Number of splits along the gauge.\"\"\")\n\n show_ticks = param.Boolean(default=True, doc=\"\"\"\n Whether to show ticks along the dials.\"\"\")\n\n show_labels = param.Boolean(default=True, doc=\"\"\"\n Whether to show tick labels along the dials.\"\"\")\n\n start_angle = param.Number(default=225, doc=\"\"\"\n Angle at which the gauge starts.\"\"\")\n\n tooltip_format = param.String(default='{b} : {c}%', doc=\"\"\"\n Formatting string for the hover tooltip.\"\"\")\n\n title_size = param.Integer(default=18, doc=\"\"\"\n Size of title font.\"\"\")\n\n value = param.Number(default=25, doc=\"\"\"\n Value to indicate on the gauge a value within the declared bounds.\"\"\")\n\n width = param.Integer(default=300, bounds=(0, None))\n\n _rename = {}\n\n _source_transforms = {\n 'annulus_width': None, 'bounds': None, 'colors': None,\n 'custom_opts': None, 'end_angle': None, 'format': None,\n 'num_splits': None, 'show_ticks': None, 'show_labels': None,\n 'start_angle': None, 'tooltip_format': None, 'title_size': None,\n 'value': None\n }\n\n @property\n def _widget_type(self):\n if 'panel.models.echarts' not in sys.modules:\n from ..models.echarts import ECharts\n else:\n ECharts = getattr(sys.modules['panel.models.echarts'], 'ECharts')\n return ECharts\n\n def __init__(self, **params):\n super().__init__(**params)\n self._update_value_bounds()\n\n @param.depends('bounds', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = self.bounds\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n vmin, vmax = msg.pop('bounds', self.bounds)\n msg['data'] = {\n 'tooltip': {\n 'formatter': msg.pop('tooltip_format', self.tooltip_format)\n },\n 'series': [{\n 'name': 'Gauge',\n 'type': 'gauge',\n 'axisTick': {'show': msg.pop('show_ticks', self.show_ticks)},\n 'axisLabel': {'show': msg.pop('show_labels', self.show_labels)},\n 'title': {'fontWeight': 'bold', 'fontSize': msg.pop('title_size', self.title_size)},\n 'splitLine': {'show': True},\n 'radius': '100%',\n 'detail': {'formatter': msg.pop('format', self.format)},\n 'min': vmin,\n 'max': vmax,\n 'startAngle': msg.pop('start_angle', self.start_angle),\n 'endAngle': msg.pop('end_angle', self.end_angle),\n 'splitNumber': msg.pop('num_splits', self.num_splits),\n 'data': [{'value': msg.pop('value', self.value), 'name': self.name}],\n 'axisLine': {\n 'lineStyle': {\n 'width': msg.pop('annulus_width', self.annulus_width),\n }\n }\n }]\n }\n colors = msg.pop('colors', self.colors)\n if colors:\n msg['data']['series'][0]['axisLine']['lineStyle']['color'] = colors\n custom_opts = msg.pop('custom_opts', self.custom_opts)\n if custom_opts:\n gauge = msg['data']['series'][0]\n for k, v in custom_opts.items():\n if k not in gauge or not isinstance(gauge[k], dict):\n gauge[k] = v\n else:\n gauge[k].update(v)\n return msg\n\n\nclass Dial(ValueIndicator):\n \"\"\"\n A Dial represents a value in some range as a position on an\n annular dial. It is similar to a Gauge but more minimal visually.\n \"\"\"\n\n annulus_width = param.Number(default=0.2, doc=\"\"\"\n Width of the radial annulus as a fraction of the total.\"\"\")\n\n bounds = param.Range(default=(0, 100), doc=\"\"\"\n The upper and lower bound of the dial.\"\"\")\n\n colors = param.List(default=None, doc=\"\"\"\n Color thresholds for the Dial, specified as a list of tuples\n of the fractional threshold and the color to switch to.\"\"\")\n\n default_color = param.String(default='lightblue', doc=\"\"\"\n Color of the radial annulus if not color thresholds are supplied.\"\"\")\n\n end_angle = param.Number(default=25, doc=\"\"\"\n Angle at which the dial ends.\"\"\")\n\n format = param.String(default='{value}%', doc=\"\"\"\n Formatting string for the value indicator and lower/upper bounds.\"\"\")\n\n height = param.Integer(default=250, bounds=(1, None))\n\n nan_format = param.String(default='-', doc=\"\"\"\n How to format nan values.\"\"\")\n\n needle_color = param.String(default='black', doc=\"\"\"\n Color of the Dial needle.\"\"\")\n\n needle_width = param.Number(default=0.1, doc=\"\"\"\n Radial width of the needle.\"\"\")\n\n start_angle = param.Number(default=-205, doc=\"\"\"\n Angle at which the dial starts.\"\"\")\n\n tick_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial min/max labels.\"\"\")\n\n title_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial title.\"\"\")\n\n unfilled_color = param.String(default='whitesmoke', doc=\"\"\"\n Color of the unfilled region of the Dial.\"\"\")\n\n value_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial value label.\"\"\")\n\n value = param.Number(default=25, allow_None=True, doc=\"\"\"\n Value to indicate on the dial a value within the declared bounds.\"\"\")\n\n width = param.Integer(default=250, bounds=(1, None))\n\n _manual_params = [\n 'value', 'start_angle', 'end_angle', 'bounds',\n 'annulus_width', 'format', 'background', 'needle_width',\n 'tick_size', 'title_size', 'value_size', 'colors',\n 'default_color', 'unfilled_color', 'height',\n 'width', 'nan_format', 'needle_color'\n ]\n\n _data_params = _manual_params\n\n _rename = {'background': 'background_fill_color'}\n\n def __init__(self, **params):\n super().__init__(**params)\n self._update_value_bounds()\n\n @param.depends('bounds', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = self.bounds\n\n def _get_data(self):\n vmin, vmax = self.bounds\n value = self.value\n if value is None:\n value = float('nan')\n fraction = (value-vmin)/(vmax-vmin)\n start = (np.radians(360-self.start_angle) - pi % (2*pi)) + pi\n end = (np.radians(360-self.end_angle) - pi % (2*pi)) + pi\n distance = (abs(end-start) % (pi*2))\n if end>start:\n distance = (pi*2)-distance\n radial_fraction = distance*fraction\n angle = start if np.isnan(fraction) else (start-radial_fraction)\n inner_radius = 1-self.annulus_width\n\n color = self.default_color\n for val, clr in (self.colors or [])[::-1]:\n if fraction <= val:\n color = clr\n\n annulus_data = {\n 'starts': np.array([start, angle]),\n 'ends' : np.array([angle, end]),\n 'color': [color, self.unfilled_color],\n 'radius': np.array([inner_radius, inner_radius])\n }\n\n x0s, y0s, x1s, y1s, clrs = [], [], [], [], []\n colors = self.colors or []\n for (val, _), (_, clr) in zip(colors[:-1], colors[1:]):\n tangle = start-(distance*val)\n if (vmin + val * (vmax-vmin)) <= value:\n continue\n x0, y0 = np.cos(tangle), np.sin(tangle)\n x1, y1 = x0*inner_radius, y0*inner_radius\n x0s.append(x0)\n y0s.append(y0)\n x1s.append(x1)\n y1s.append(y1)\n clrs.append(clr)\n\n threshold_data = {\n 'x0': x0s, 'y0': y0s, 'x1': x1s, 'y1': y1s, 'color': clrs\n }\n\n center_radius = 1-self.annulus_width/2.\n x, y = np.cos(angle)*center_radius, np.sin(angle)*center_radius\n needle_start = pi+angle-(self.needle_width/2.)\n needle_end = pi+angle+(self.needle_width/2.)\n needle_data = {\n 'x': np.array([x]),\n 'y': np.array([y]),\n 'start': np.array([needle_start]),\n 'end': np.array([needle_end]),\n 'radius': np.array([center_radius])\n }\n\n value = self.format.format(value=value).replace('nan', self.nan_format)\n min_value = self.format.format(value=vmin)\n max_value = self.format.format(value=vmax)\n tminx, tminy = np.cos(start)*center_radius, np.sin(start)*center_radius\n tmaxx, tmaxy = np.cos(end)*center_radius, np.sin(end)*center_radius\n tmin_angle, tmax_angle = start+pi, end+pi % pi\n scale = (self.height/400)\n title_size = self.title_size if self.title_size else '%spt' % (scale*32)\n value_size = self.value_size if self.value_size else '%spt' % (scale*48)\n tick_size = self.tick_size if self.tick_size else '%spt' % (scale*18)\n\n text_data= {\n 'x': np.array([0, 0, tminx, tmaxx]),\n 'y': np.array([-.2, -.5, tminy, tmaxy]),\n 'text': [self.name, value, min_value, max_value],\n 'rot': np.array([0, 0, tmin_angle, tmax_angle]),\n 'size': [title_size, value_size, tick_size, tick_size],\n 'color': ['black', color, 'black', 'black']\n }\n return annulus_data, needle_data, threshold_data, text_data\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n params = self._process_param_change(self._init_params())\n model = figure(\n x_range=(-1,1), y_range=(-1,1), tools=[],\n outline_line_color=None, toolbar_location=None,\n width=self.width, height=self.height, **params\n )\n model.xaxis.visible = False\n model.yaxis.visible = False\n model.grid.visible = False\n\n annulus, needle, threshold, text = self._get_data()\n\n # Draw annulus\n annulus_source = ColumnDataSource(data=annulus, name='annulus_source')\n model.annular_wedge(\n x=0, y=0, inner_radius='radius', outer_radius=1, start_angle='starts',\n end_angle='ends', line_color='gray', color='color', direction='clock',\n source=annulus_source\n )\n\n # Draw needle\n needle_source = ColumnDataSource(data=needle, name='needle_source')\n model.wedge(\n x='x', y='y', radius='radius', start_angle='start', end_angle='end',\n fill_color=self.needle_color, line_color=self.needle_color,\n source=needle_source, name='needle_renderer'\n )\n\n # Draw thresholds\n threshold_source = ColumnDataSource(data=threshold, name='threshold_source')\n model.segment(\n x0='x0', x1='x1', y0='y0', y1='y1', line_color='color', source=threshold_source,\n line_width=2\n )\n\n # Draw labels\n text_source = ColumnDataSource(data=text, name='label_source')\n model.text(\n x='x', y='y', text='text', font_size='size', text_align='center',\n text_color='color', source=text_source, text_baseline='top',\n angle='rot'\n )\n\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n update_data = False\n for event in events:\n if event.name in ('width', 'height'):\n model.update(**{event.name: event.new})\n if event.name in self._data_params:\n update_data = True\n elif event.name == 'needle_color':\n needle_r = model.select(name='needle_renderer')\n needle_r.glyph.line_color = event.new\n needle_r.glyph.fill_color = event.new\n if not update_data:\n return\n annulus, needle, threshold, labels = self._get_data()\n model.select(name='annulus_source').data.update(annulus)\n model.select(name='needle_source').data.update(needle)\n model.select(name='threshold_source').data.update(threshold)\n model.select(name='label_source').data.update(labels)\n\n\nclass Trend(SyncableData, Indicator):\n \"\"\"\n The Trend indicator enables the user to display a Dashboard KPI Card.\n\n The card can be layout out as:\n\n * a column (text and plot on top of each other) or\n * a row (text and plot after each other)\n\n The text section is responsive and resizes on window resize.\n \"\"\"\n\n data = param.Parameter(doc=\"\"\"\n The plot data declared as a dictionary of arrays or a DataFrame.\"\"\")\n\n layout = param.ObjectSelector(default=\"column\", objects=[\"column\", \"row\"])\n\n plot_x = param.String(default=\"x\", doc=\"\"\"\n The name of the key in the plot_data to use on the x-axis.\"\"\")\n\n plot_y = param.String(default=\"y\", doc=\"\"\"\n The name of the key in the plot_data to use on the y-axis.\"\"\")\n\n plot_color = param.String(default=BLUE, doc=\"\"\"\n The color to use in the plot.\"\"\")\n\n plot_type = param.ObjectSelector(default=\"bar\", objects=[\"line\", \"step\", \"area\", \"bar\"], doc=\"\"\"\n The plot type to render the plot data as.\"\"\")\n\n pos_color = param.String(GREEN, doc=\"\"\"\n The color used to indicate a positive change.\"\"\")\n\n neg_color = param.String(RED, doc=\"\"\"\n The color used to indicate a negative change.\"\"\")\n\n title = param.String(doc=\"\"\"The title or a short description of the card\"\"\")\n\n value = param.Parameter(default='auto', doc=\"\"\"\n The primary value to be displayed.\"\"\")\n\n value_change = param.Parameter(default='auto', doc=\"\"\"\n A secondary value. For example the change in percent.\"\"\")\n\n _data_params = ['data']\n\n _manual_params = ['data']\n\n _rename = {'data': None, 'selection': None}\n\n _widget_type = _BkTrendIndicator\n\n def _get_data(self):\n if self.data is None:\n return None, {self.plot_x: [], self.plot_y: []}\n elif isinstance(self.data, dict):\n return self.data, self.data\n return self.data, ColumnDataSource.from_df(self.data)\n\n def _init_params(self):\n props = super()._init_params()\n self._processed, self._data = self._get_data()\n props['source'] = ColumnDataSource(data=self._data)\n return props\n\n def _trigger_auto_values(self):\n trigger = []\n if self.value == 'auto':\n trigger.append('value')\n if self.value_change == 'auto':\n trigger.append('value_change')\n if trigger:\n self.param.trigger(*trigger)\n\n @updating\n def _stream(self, stream, rollover=None):\n self._trigger_auto_values()\n super()._stream(stream, rollover)\n\n def _update_cds(self, *events):\n super()._update_cds(*events)\n self._trigger_auto_values()\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n ys = self._data.get(self.plot_y, [])\n if 'value' in msg and msg['value'] == 'auto':\n if len(ys):\n msg['value'] = ys[-1]\n else:\n msg['value'] = 0\n if 'value_change' in msg and msg['value_change'] == 'auto':\n if len(ys) > 1:\n y1, y2 = self._data.get(self.plot_y)[-2:]\n msg['value_change'] = 0 if y1 == 0 else (y2/y1 - 1)\n else:\n msg['value_change'] = 0\n return msg\n\n\nMARGIN = {\n \"text_pane\": {\"column\": (5, 10, 0, 10), \"row\": (0, 10, 0, 10)},\n \"progress\": {\"column\": (0, 10, 5, 10), \"row\": (12, 10, 0, 10)},\n}\n\n\n\nclass ptqdm(_tqdm):\n\n def __init__(self, *args, **kwargs):\n self._indicator = kwargs.pop('indicator')\n super().__init__(*args, **kwargs)\n\n def display(self, msg=None, pos=None, bar_style=None):\n super().display(msg, pos)\n style = self._indicator.text_pane.style or {}\n color = self.colour or 'black'\n self._indicator.text_pane.style = dict(style, color=color)\n if self.total is not None and self.n is not None:\n self._indicator.max = int(self.total) # Can be numpy.int64\n self._indicator.value = int(self.n)\n self._indicator.text = self._to_text(**self.format_dict)\n return True\n\n def _to_text(self, n, total, **kwargs):\n return self.format_meter(n, total, **{**kwargs, \"ncols\": 0})\n\n def close(self):\n super().close()\n if not self.leave:\n self._indicator.reset()\n return _tqdm\n\n\nclass Tqdm(Indicator):\n\n layout = param.ClassSelector(class_=(Column, Row), precedence=-1, constant=True, doc=\"\"\"\n The layout for the text and progress indicator.\"\"\",)\n\n max = param.Integer(default=100, doc=\"\"\"\n The maximum value of the progress indicator.\"\"\")\n\n progress = param.ClassSelector(class_=Progress, precedence=-1, doc=\"\"\"\n The Progress indicator used to display the progress.\"\"\",)\n\n text = param.String(default='', doc=\"\"\"\n The current tqdm style progress text.\"\"\")\n\n text_pane = param.ClassSelector(class_=Str, precedence=-1, doc=\"\"\"\n The pane to display the text to.\"\"\")\n\n value = param.Integer(default=0, bounds=(0, None), doc=\"\"\"\n The current value of the progress bar. If set to None the progress\n bar will be indeterminate and animate depending on the active\n parameter.\"\"\")\n\n margin = param.Parameter(default=0, doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n width = param.Integer(default=400, bounds=(0, None), doc=\"\"\"\n The width of the component (in pixels). This can be either\n fixed or preferred width, depending on width sizing policy.\"\"\")\n\n write_to_console = param.Boolean(default=False, doc=\"\"\"\n Whether or not to also write to the console.\"\"\")\n\n _layouts = {Row: 'row', Column: 'column'}\n\n _rename = {'value': None, 'min': None, 'max': None, 'text': None}\n\n def __init__(self, **params):\n layout = params.pop('layout', 'column')\n layout = self._layouts.get(layout, layout) \n if \"text_pane\" not in params:\n sizing_mode = 'stretch_width' if layout == 'column' else 'fixed'\n params[\"text_pane\"] = Str(\n None, min_height=20, min_width=280, sizing_mode=sizing_mode,\n margin=MARGIN[\"text_pane\"][layout],\n )\n if \"progress\" not in params:\n params[\"progress\"] = Progress(\n active=False,\n sizing_mode=\"stretch_width\",\n min_width=100,\n margin=MARGIN[\"progress\"][layout],\n )\n\n layout_params = {p: params.get(p, getattr(self, p)) for p in Viewable.param}\n if layout == 'row' or layout is Row:\n params['layout'] = Row(\n params['progress'], params['text_pane'], **layout_params\n )\n else:\n params['layout'] = Column(\n params['text_pane'], params['progress'], **layout_params\n )\n super().__init__(**params)\n\n self.param.watch(self._update_layout, list(Viewable.param))\n\n if self.value == 0:\n # Hack: to give progress the initial look\n self.progress.max = 100000\n self.progress.value = 1\n else:\n self.progress.max = self.max\n self.progress.value = self.value\n self.text_pane.object = self.text\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = self.layout._get_model(doc, root, parent, comm)\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _cleanup(self, root):\n super()._cleanup(root)\n self.layout._cleanup(root)\n\n def _update_layout(self, *events):\n self.layout.param.set_param(**{event.name: event.new for event in events})\n\n @param.depends(\"text\", watch=True)\n def _update_text(self):\n if self.text_pane:\n self.text_pane.object = self.text\n\n @param.depends(\"value\", watch=True)\n def _update_value(self):\n if self.progress:\n self.progress.value = self.value\n\n @param.depends(\"max\", watch=True)\n def _update_max(self):\n if self.progress:\n self.progress.max = self.max\n\n def __call__(self, *args, **kwargs):\n kwargs['indicator'] = self\n if not self.write_to_console:\n f = open(os.devnull, 'w')\n kwargs['file'] = f\n return ptqdm(*args, **kwargs)\n\n __call__.__doc__ = ptqdm.__doc__\n\n def pandas(self, *args, **kwargs):\n kwargs['indicator'] = self\n if not self.write_to_console and 'file' not in kwargs:\n f = open(os.devnull, 'w')\n kwargs['file'] = f\n return ptqdm.pandas(*args, **kwargs)\n\n def reset(self):\n \"\"\"Resets the parameters\"\"\"\n self.value = self.param.value.default\n self.text = self.param.text.default\n", "path": "panel/widgets/indicators.py" } ]
diff --git a/panel/widgets/indicators.py b/panel/widgets/indicators.py index f3e69e2eda..f737627b38 100644 --- a/panel/widgets/indicators.py +++ b/panel/widgets/indicators.py @@ -749,6 +749,8 @@ class Tqdm(Indicator): _layouts = {Row: 'row', Column: 'column'} + _rename = {'value': None, 'min': None, 'max': None, 'text': None} + def __init__(self, **params): layout = params.pop('layout', 'column') layout = self._layouts.get(layout, layout)
graphql-python__graphene-django-194
graphene 1.3 annotation bug There seems to be a bug with graphene == 1.3 I have a model that I annotate a field "total_rent" I think graphene is stripping it away somehow I get this exception ```Cannot resolve keyword 'total_rent' into field. Choices are ... ``` When reverting back to graphene == 1.2.1 the error goes away. I wish I could provide a more in depth report but my deadlines are to much at the moment
[ { "content": "from functools import partial\n\nfrom django.db.models.query import QuerySet\n\nfrom graphene.types import Field, List\nfrom graphene.relay import ConnectionField, PageInfo\nfrom graphql_relay.connection.arrayconnection import connection_from_list_slice\n\nfrom .settings import graphene_settings\nfrom .utils import DJANGO_FILTER_INSTALLED, maybe_queryset\n\n\nclass DjangoListField(Field):\n\n def __init__(self, _type, *args, **kwargs):\n super(DjangoListField, self).__init__(List(_type), *args, **kwargs)\n\n @property\n def model(self):\n return self.type.of_type._meta.node._meta.model\n\n @staticmethod\n def list_resolver(resolver, root, args, context, info):\n return maybe_queryset(resolver(root, args, context, info))\n\n def get_resolver(self, parent_resolver):\n return partial(self.list_resolver, parent_resolver)\n\n\nclass DjangoConnectionField(ConnectionField):\n\n def __init__(self, *args, **kwargs):\n self.on = kwargs.pop('on', False)\n self.max_limit = kwargs.pop(\n 'max_limit',\n graphene_settings.RELAY_CONNECTION_MAX_LIMIT\n )\n self.enforce_first_or_last = kwargs.pop(\n 'enforce_first_or_last',\n graphene_settings.RELAY_CONNECTION_ENFORCE_FIRST_OR_LAST\n )\n super(DjangoConnectionField, self).__init__(*args, **kwargs)\n\n @property\n def node_type(self):\n return self.type._meta.node\n\n @property\n def model(self):\n return self.node_type._meta.model\n\n def get_manager(self):\n if self.on:\n return getattr(self.model, self.on)\n else:\n return self.model._default_manager\n\n @classmethod\n def merge_querysets(cls, default_queryset, queryset):\n return default_queryset & queryset\n\n @classmethod\n def connection_resolver(cls, resolver, connection, default_manager, max_limit,\n enforce_first_or_last, root, args, context, info):\n first = args.get('first')\n last = args.get('last')\n\n if enforce_first_or_last:\n assert first or last, (\n 'You must provide a `first` or `last` value to properly paginate the `{}` connection.'\n ).format(info.field_name)\n\n if max_limit:\n if first:\n assert first <= max_limit, (\n 'Requesting {} records on the `{}` connection exceeds the `first` limit of {} records.'\n ).format(first, info.field_name, max_limit)\n args['first'] = min(first, max_limit)\n\n if last:\n assert last <= max_limit, (\n 'Requesting {} records on the `{}` connection exceeds the `last` limit of {} records.'\n ).format(first, info.field_name, max_limit)\n args['last'] = min(last, max_limit)\n\n iterable = resolver(root, args, context, info)\n if iterable is None:\n iterable = default_manager\n iterable = maybe_queryset(iterable)\n if isinstance(iterable, QuerySet):\n if iterable is not default_manager:\n default_queryset = maybe_queryset(default_manager)\n iterable = cls.merge_querysets(default_queryset, iterable)\n _len = iterable.count()\n else:\n _len = len(iterable)\n connection = connection_from_list_slice(\n iterable,\n args,\n slice_start=0,\n list_length=_len,\n list_slice_length=_len,\n connection_type=connection,\n edge_type=connection.Edge,\n pageinfo_type=PageInfo,\n )\n connection.iterable = iterable\n connection.length = _len\n return connection\n\n def get_resolver(self, parent_resolver):\n return partial(\n self.connection_resolver,\n parent_resolver,\n self.type,\n self.get_manager(),\n self.max_limit,\n self.enforce_first_or_last\n )\n\n\ndef get_connection_field(*args, **kwargs):\n if DJANGO_FILTER_INSTALLED:\n from .filter.fields import DjangoFilterConnectionField\n return DjangoFilterConnectionField(*args, **kwargs)\n return DjangoConnectionField(*args, **kwargs)\n", "path": "graphene_django/fields.py" } ]
[ { "content": "from functools import partial\n\nfrom django.db.models.query import QuerySet\n\nfrom graphene.types import Field, List\nfrom graphene.relay import ConnectionField, PageInfo\nfrom graphql_relay.connection.arrayconnection import connection_from_list_slice\n\nfrom .settings import graphene_settings\nfrom .utils import DJANGO_FILTER_INSTALLED, maybe_queryset\n\n\nclass DjangoListField(Field):\n\n def __init__(self, _type, *args, **kwargs):\n super(DjangoListField, self).__init__(List(_type), *args, **kwargs)\n\n @property\n def model(self):\n return self.type.of_type._meta.node._meta.model\n\n @staticmethod\n def list_resolver(resolver, root, args, context, info):\n return maybe_queryset(resolver(root, args, context, info))\n\n def get_resolver(self, parent_resolver):\n return partial(self.list_resolver, parent_resolver)\n\n\nclass DjangoConnectionField(ConnectionField):\n\n def __init__(self, *args, **kwargs):\n self.on = kwargs.pop('on', False)\n self.max_limit = kwargs.pop(\n 'max_limit',\n graphene_settings.RELAY_CONNECTION_MAX_LIMIT\n )\n self.enforce_first_or_last = kwargs.pop(\n 'enforce_first_or_last',\n graphene_settings.RELAY_CONNECTION_ENFORCE_FIRST_OR_LAST\n )\n super(DjangoConnectionField, self).__init__(*args, **kwargs)\n\n @property\n def node_type(self):\n return self.type._meta.node\n\n @property\n def model(self):\n return self.node_type._meta.model\n\n def get_manager(self):\n if self.on:\n return getattr(self.model, self.on)\n else:\n return self.model._default_manager\n\n @classmethod\n def merge_querysets(cls, default_queryset, queryset):\n return queryset & default_queryset\n\n @classmethod\n def connection_resolver(cls, resolver, connection, default_manager, max_limit,\n enforce_first_or_last, root, args, context, info):\n first = args.get('first')\n last = args.get('last')\n\n if enforce_first_or_last:\n assert first or last, (\n 'You must provide a `first` or `last` value to properly paginate the `{}` connection.'\n ).format(info.field_name)\n\n if max_limit:\n if first:\n assert first <= max_limit, (\n 'Requesting {} records on the `{}` connection exceeds the `first` limit of {} records.'\n ).format(first, info.field_name, max_limit)\n args['first'] = min(first, max_limit)\n\n if last:\n assert last <= max_limit, (\n 'Requesting {} records on the `{}` connection exceeds the `last` limit of {} records.'\n ).format(first, info.field_name, max_limit)\n args['last'] = min(last, max_limit)\n\n iterable = resolver(root, args, context, info)\n if iterable is None:\n iterable = default_manager\n iterable = maybe_queryset(iterable)\n if isinstance(iterable, QuerySet):\n if iterable is not default_manager:\n default_queryset = maybe_queryset(default_manager)\n iterable = cls.merge_querysets(default_queryset, iterable)\n _len = iterable.count()\n else:\n _len = len(iterable)\n connection = connection_from_list_slice(\n iterable,\n args,\n slice_start=0,\n list_length=_len,\n list_slice_length=_len,\n connection_type=connection,\n edge_type=connection.Edge,\n pageinfo_type=PageInfo,\n )\n connection.iterable = iterable\n connection.length = _len\n return connection\n\n def get_resolver(self, parent_resolver):\n return partial(\n self.connection_resolver,\n parent_resolver,\n self.type,\n self.get_manager(),\n self.max_limit,\n self.enforce_first_or_last\n )\n\n\ndef get_connection_field(*args, **kwargs):\n if DJANGO_FILTER_INSTALLED:\n from .filter.fields import DjangoFilterConnectionField\n return DjangoFilterConnectionField(*args, **kwargs)\n return DjangoConnectionField(*args, **kwargs)\n", "path": "graphene_django/fields.py" } ]
diff --git a/graphene_django/fields.py b/graphene_django/fields.py index c2a2a8fd4..6f2755bbf 100644 --- a/graphene_django/fields.py +++ b/graphene_django/fields.py @@ -57,7 +57,7 @@ def get_manager(self): @classmethod def merge_querysets(cls, default_queryset, queryset): - return default_queryset & queryset + return queryset & default_queryset @classmethod def connection_resolver(cls, resolver, connection, default_manager, max_limit, diff --git a/graphene_django/tests/test_query.py b/graphene_django/tests/test_query.py index c1deebbcd..a5fb1f45f 100644 --- a/graphene_django/tests/test_query.py +++ b/graphene_django/tests/test_query.py @@ -284,6 +284,65 @@ def resolve_all_reporters(self, args, context, info): } +def test_should_keep_annotations(): + from django.db.models import ( + Count, + Avg, + ) + + class ReporterType(DjangoObjectType): + + class Meta: + model = Reporter + interfaces = (Node, ) + only_fields = ('articles', ) + + class ArticleType(DjangoObjectType): + + class Meta: + model = Article + interfaces = (Node, ) + filter_fields = ('lang', ) + + class Query(graphene.ObjectType): + all_reporters = DjangoConnectionField(ReporterType) + all_articles = DjangoConnectionField(ArticleType) + + def resolve_all_reporters(self, args, context, info): + return Reporter.objects.annotate(articles_c=Count('articles')).order_by('articles_c') + + def resolve_all_articles(self, args, context, info): + return Article.objects.annotate(import_avg=Avg('importance')).order_by('import_avg') + + schema = graphene.Schema(query=Query) + query = ''' + query ReporterConnectionQuery { + allReporters { + pageInfo { + hasNextPage + } + edges { + node { + id + } + } + } + allArticles { + pageInfo { + hasNextPage + } + edges { + node { + id + } + } + } + } + ''' + result = schema.execute(query) + assert not result.errors + + @pytest.mark.skipif(not DJANGO_FILTER_INSTALLED, reason="django-filter should be installed") def test_should_query_node_filtering():
pwr-Solaar__Solaar-618
solaar window always visible **Information** - Solaar version: 1.0.1 from github - Distribution: github The solaar window is always on top of my other windows. This can become quite a pain, and is unnecessary because the window can be brought to the front by clicking on a receiver or device in the tray menu.
[ { "content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger # , DEBUG as _DEBUG\n_log = getLogger(__name__)\ndel getLogger\n\nfrom gi.repository import Gtk, Gdk, GLib\nfrom gi.repository.GObject import TYPE_PYOBJECT\n\nfrom solaar import NAME\nfrom solaar.i18n import _, ngettext\n# from solaar import __version__ as VERSION\nfrom solaar.ui import ui_async as _ui_async\nfrom logitech_receiver import hidpp10 as _hidpp10\nfrom logitech_receiver.common import NamedInts as _NamedInts, NamedInt as _NamedInt\nfrom logitech_receiver.status import KEYS as _K\nfrom . import config_panel as _config_panel\nfrom . import action as _action, icons as _icons\nfrom .about import show_window as _show_about_window\n\n#\n# constants\n#\n\n_SMALL_BUTTON_ICON_SIZE = Gtk.IconSize.MENU\n_NORMAL_BUTTON_ICON_SIZE = Gtk.IconSize.BUTTON\n_TREE_ICON_SIZE = Gtk.IconSize.BUTTON\n_INFO_ICON_SIZE = Gtk.IconSize.LARGE_TOOLBAR\n_DEVICE_ICON_SIZE = Gtk.IconSize.DND\ntry:\n import gi\n gi.check_version(\"3.7.4\")\n _CAN_SET_ROW_NONE = None\nexcept (ValueError, AttributeError):\n _CAN_SET_ROW_NONE = ''\n\n# tree model columns\n_COLUMN = _NamedInts(PATH=0, NUMBER=1, ACTIVE=2, NAME=3, ICON=4, STATUS_TEXT=5, STATUS_ICON=6, DEVICE=7)\n_COLUMN_TYPES = (str, int, bool, str, str, str, str, TYPE_PYOBJECT)\n_TREE_SEPATATOR = (None, 0, False, None, None, None, None, None)\nassert len(_TREE_SEPATATOR) == len(_COLUMN_TYPES)\nassert len(_COLUMN_TYPES) == len(_COLUMN)\n\n#\n# create UI layout\n#\n\ndef _new_button(label, icon_name=None, icon_size=_NORMAL_BUTTON_ICON_SIZE, tooltip=None, toggle=False, clicked=None):\n\tif toggle:\n\t\tb = Gtk.ToggleButton()\n\telse:\n\t\tb = Gtk.Button(label) if label else Gtk.Button()\n\n\tif icon_name:\n\t\timage = Gtk.Image.new_from_icon_name(icon_name, icon_size)\n\t\tb.set_image(image)\n\n\tif tooltip:\n\t\tb.set_tooltip_text(tooltip)\n\n\tif not label and icon_size < _NORMAL_BUTTON_ICON_SIZE:\n\t\tb.set_relief(Gtk.ReliefStyle.NONE)\n\t\tb.set_focus_on_click(False)\n\n\tif clicked is not None:\n\t\tb.connect('clicked', clicked)\n\n\treturn b\n\n\ndef _create_receiver_panel():\n\tp = Gtk.Box.new(Gtk.Orientation.VERTICAL, 4)\n\n\tp._count = Gtk.Label()\n\tp._count.set_padding(24, 0)\n\tp._count.set_alignment(0, 0.5)\n\tp.pack_start(p._count, True, True, 0)\n\n\tp._scanning = Gtk.Label(_(\"Scanning\") + '...')\n\tp._spinner = Gtk.Spinner()\n\n\tbp = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 8)\n\tbp.pack_start(Gtk.Label(' '), True, True, 0)\n\tbp.pack_start(p._scanning, False, False, 0)\n\tbp.pack_end(p._spinner, False, False, 0)\n\tp.pack_end(bp, False, False, 0)\n\n\treturn p\n\n\ndef _create_device_panel():\n\tp = Gtk.Box.new(Gtk.Orientation.VERTICAL, 4)\n\n\tdef _status_line(label_text):\n\t\tb = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 8)\n\t\tb.set_size_request(10, 28)\n\n\t\tb._label = Gtk.Label(label_text)\n\t\tb._label.set_alignment(0, 0.5)\n\t\tb._label.set_size_request(170, 10)\n\t\tb.pack_start(b._label, False, False, 0)\n\n\t\tb._icon = Gtk.Image()\n\t\tb.pack_start(b._icon, False, False, 0)\n\n\t\tb._text = Gtk.Label()\n\t\tb._text.set_alignment(0, 0.5)\n\t\tb.pack_start(b._text, True, True, 0)\n\n\t\treturn b\n\n\tp._battery = _status_line(_(\"Battery\"))\n\tp.pack_start(p._battery, False, False, 0)\n\n\tp._secure = _status_line(_(\"Wireless Link\"))\n\tp._secure._icon.set_from_icon_name('dialog-warning', _INFO_ICON_SIZE)\n\tp.pack_start(p._secure, False, False, 0)\n\n\tp._lux = _status_line(_(\"Lighting\"))\n\tp.pack_start(p._lux, False, False, 0)\n\n\tp._config = _config_panel.create()\n\tp.pack_end(p._config, False, False, 4)\n\n\treturn p\n\n\ndef _create_details_panel():\n\tp = Gtk.Frame()\n\tp.set_shadow_type(Gtk.ShadowType.NONE)\n\tp.set_size_request(240, 0)\n\tp.set_state_flags(Gtk.StateFlags.ACTIVE, True)\n\n\tp._text = Gtk.Label()\n\tp._text.set_padding(6, 4)\n\tp._text.set_alignment(0, 0)\n\tp._text.set_selectable(True)\n\tp.add(p._text)\n\n\treturn p\n\n\ndef _create_buttons_box():\n\tbb = Gtk.ButtonBox(Gtk.Orientation.HORIZONTAL)\n\tbb.set_layout(Gtk.ButtonBoxStyle.END)\n\n\tbb._details = _new_button(None, 'dialog-information', _SMALL_BUTTON_ICON_SIZE,\n\t\t\t\t\ttooltip=_(\"Show Technical Details\"), toggle=True, clicked=_update_details)\n\tbb.add(bb._details)\n\tbb.set_child_secondary(bb._details, True)\n\tbb.set_child_non_homogeneous(bb._details, True)\n\n\tdef _pair_new_device(trigger):\n\t\tassert _find_selected_device_id() is not None\n\t\treceiver = _find_selected_device()\n\t\tassert receiver is not None\n\t\tassert bool(receiver)\n\t\tassert receiver.kind is None\n\t\t_action.pair(_window, receiver)\n\n\tbb._pair = _new_button(_(\"Pair new device\"), 'list-add', clicked=_pair_new_device)\n\tbb.add(bb._pair)\n\n\tdef _unpair_current_device(trigger):\n\t\tassert _find_selected_device_id() is not None\n\t\tdevice = _find_selected_device()\n\t\tassert device is not None\n\t\tassert bool(device)\n\t\tassert device.kind is not None\n\t\t_action.unpair(_window, device)\n\n\tbb._unpair = _new_button(_(\"Unpair\"), 'edit-delete', clicked=_unpair_current_device)\n\tbb.add(bb._unpair)\n\n\treturn bb\n\n\ndef _create_empty_panel():\n\tp = Gtk.Label()\n\tp.set_markup('<small>' + _(\"Select a device\") + '</small>')\n\tp.set_sensitive(False)\n\n\treturn p\n\n\ndef _create_info_panel():\n\tp = Gtk.Box.new(Gtk.Orientation.VERTICAL, 4)\n\n\tp._title = Gtk.Label(' ')\n\tp._title.set_alignment(0, 0.5)\n\tp._icon = Gtk.Image()\n\n\tb1 = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 4)\n\tb1.pack_start(p._title, True, True, 0)\n\tb1.pack_start(p._icon, False, False, 0)\n\tp.pack_start(b1, False, False, 0)\n\n\tp.pack_start(Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 0) # spacer\n\n\tp._receiver = _create_receiver_panel()\n\tp.pack_start(p._receiver, True, True, 0)\n\n\tp._device = _create_device_panel()\n\tp.pack_start(p._device, True, True, 0)\n\n\tp.pack_start(Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 0) # spacer\n\n\tp._buttons = _create_buttons_box()\n\tp.pack_end(p._buttons, False, False, 0)\n\n\treturn p\n\n\ndef _create_tree(model):\n\ttree = Gtk.TreeView()\n\ttree.set_size_request(240, 0)\n\ttree.set_headers_visible(False)\n\ttree.set_show_expanders(False)\n\ttree.set_level_indentation(20)\n\t# tree.set_fixed_height_mode(True)\n\ttree.set_enable_tree_lines(True)\n\ttree.set_reorderable(False)\n\ttree.set_enable_search(False)\n\ttree.set_model(model)\n\n\tdef _is_separator(model, item, _ignore=None):\n\t\treturn model.get_value(item, _COLUMN.PATH) is None\n\ttree.set_row_separator_func(_is_separator, None)\n\n\ticon_cell_renderer = Gtk.CellRendererPixbuf()\n\ticon_cell_renderer.set_property('stock-size', _TREE_ICON_SIZE)\n\ticon_column = Gtk.TreeViewColumn('Icon', icon_cell_renderer)\n\ticon_column.add_attribute(icon_cell_renderer, 'sensitive', _COLUMN.ACTIVE)\n\ticon_column.add_attribute(icon_cell_renderer, 'icon-name', _COLUMN.ICON)\n\ticon_column.set_fixed_width(1)\n\ttree.append_column(icon_column)\n\n\tname_cell_renderer = Gtk.CellRendererText()\n\tname_column = Gtk.TreeViewColumn('device name', name_cell_renderer)\n\tname_column.add_attribute(name_cell_renderer, 'sensitive', _COLUMN.ACTIVE)\n\tname_column.add_attribute(name_cell_renderer, 'text', _COLUMN.NAME)\n\tname_column.set_expand(True)\n\ttree.append_column(name_column)\n\ttree.set_expander_column(name_column)\n\n\tstatus_cell_renderer = Gtk.CellRendererText()\n\tstatus_cell_renderer.set_property('scale', 0.85)\n\tstatus_cell_renderer.set_property('xalign', 1)\n\tstatus_column = Gtk.TreeViewColumn('status text', status_cell_renderer)\n\tstatus_column.add_attribute(status_cell_renderer, 'sensitive', _COLUMN.ACTIVE)\n\tstatus_column.add_attribute(status_cell_renderer, 'text', _COLUMN.STATUS_TEXT)\n\tstatus_column.set_expand(True)\n\ttree.append_column(status_column)\n\n\tbattery_cell_renderer = Gtk.CellRendererPixbuf()\n\tbattery_cell_renderer.set_property('stock-size', _TREE_ICON_SIZE)\n\tbattery_column = Gtk.TreeViewColumn('status icon', battery_cell_renderer)\n\tbattery_column.add_attribute(battery_cell_renderer, 'sensitive', _COLUMN.ACTIVE)\n\tbattery_column.add_attribute(battery_cell_renderer, 'icon-name', _COLUMN.STATUS_ICON)\n\tbattery_column.set_fixed_width(1)\n\ttree.append_column(battery_column)\n\n\treturn tree\n\n\ndef _create_window_layout():\n\tassert _tree is not None\n\tassert _details is not None\n\tassert _info is not None\n\tassert _empty is not None\n\n\tassert _tree.get_selection().get_mode() == Gtk.SelectionMode.SINGLE\n\t_tree.get_selection().connect('changed', _device_selected)\n\n\ttree_scroll = Gtk.ScrolledWindow()\n\ttree_scroll.add(_tree)\n\ttree_scroll.set_min_content_width(_tree.get_size_request()[0])\n\ttree_scroll.set_shadow_type(Gtk.ShadowType.IN)\n\n\ttree_panel = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)\n\ttree_panel.set_homogeneous(False)\n\ttree_panel.pack_start(tree_scroll, True, True, 0)\n\ttree_panel.pack_start(_details, False, False, 0)\n\n\tpanel = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 16)\n\tpanel.pack_start(tree_panel, False, False, 0)\n\tpanel.pack_start(_info, True, True, 0)\n\tpanel.pack_start(_empty, True, True, 0)\n\n\tabout_button = _new_button(_(\"About\") + ' ' + NAME, 'help-about',\n\t\t\t\t\ticon_size=_SMALL_BUTTON_ICON_SIZE, clicked=_show_about_window)\n\n\tbottom_buttons_box = Gtk.ButtonBox(Gtk.Orientation.HORIZONTAL)\n\tbottom_buttons_box.set_layout(Gtk.ButtonBoxStyle.START)\n\tbottom_buttons_box.add(about_button)\n\n\t# solaar_version = Gtk.Label()\n\t# solaar_version.set_markup('<small>' + NAME + ' v' + VERSION + '</small>')\n\t# bottom_buttons_box.add(solaar_version)\n\t# bottom_buttons_box.set_child_secondary(solaar_version, True)\n\n\tvbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 8)\n\tvbox.set_border_width(8)\n\tvbox.pack_start(panel, True, True, 0)\n\tvbox.pack_end(bottom_buttons_box, False, False, 0)\n\tvbox.show_all()\n\n\t_details.set_visible(False)\n\t_info.set_visible(False)\n\treturn vbox\n\n\ndef _create():\n\twindow = Gtk.Window()\n\twindow.set_title(NAME)\n\twindow.set_role('status-window')\n\n\t# window.set_type_hint(Gdk.WindowTypeHint.UTILITY)\n\t# window.set_skip_taskbar_hint(True)\n\t# window.set_skip_pager_hint(True)\n\twindow.set_keep_above(True)\n\twindow.connect('delete-event', _hide)\n\n\tvbox = _create_window_layout()\n\twindow.add(vbox)\n\n\tgeometry = Gdk.Geometry()\n\tgeometry.min_width = 600\n\tgeometry.min_height = 320\n\tgeometry.max_width = 800\n\tgeometry.max_height = 600\n\twindow.set_geometry_hints(vbox, geometry, Gdk.WindowHints.MIN_SIZE | Gdk.WindowHints.MAX_SIZE)\n\twindow.set_position(Gtk.WindowPosition.CENTER)\n\n\treturn window\n\n#\n# window updates\n#\n\ndef _find_selected_device():\n\tselection = _tree.get_selection()\n\tmodel, item = selection.get_selected()\n\treturn model.get_value(item, _COLUMN.DEVICE) if item else None\n\n\ndef _find_selected_device_id():\n\tselection = _tree.get_selection()\n\tmodel, item = selection.get_selected()\n\tif item:\n\t\treturn _model.get_value(item, _COLUMN.PATH), _model.get_value(item, _COLUMN.NUMBER)\n\n\n# triggered by changing selection in the tree\ndef _device_selected(selection):\n\tmodel, item = selection.get_selected()\n\tdevice = model.get_value(item, _COLUMN.DEVICE) if item else None\n\t# if _log.isEnabledFor(_DEBUG):\n\t# \t_log.debug(\"window tree selected device %s\", device)\n\tif device:\n\t\t_update_info_panel(device, full=True)\n\telse:\n\t\t# When removing a receiver, one of its children may get automatically selected\n\t\t# before the tree had time to remove them as well.\n\t\t# Rather than chase around for another device to select, just clear the selection.\n\t\t_tree.get_selection().unselect_all()\n\t\t_update_info_panel(None, full=True)\n\n\ndef _receiver_row(receiver_path, receiver=None):\n\tassert receiver_path\n\n\titem = _model.get_iter_first()\n\twhile item:\n\t\t# first row matching the path must be the receiver one\n\t\tif _model.get_value(item, _COLUMN.PATH) == receiver_path:\n\t\t\treturn item\n\t\titem = _model.iter_next(item)\n\n\tif not item and receiver:\n\t\ticon_name = _icons.device_icon_name(receiver.name)\n\t\tstatus_text = None\n\t\tstatus_icon = None\n\t\trow_data = (receiver_path, 0, True, receiver.name, icon_name, status_text, status_icon, receiver)\n\t\tassert len(row_data) == len(_TREE_SEPATATOR)\n\t\t# if _log.isEnabledFor(_DEBUG):\n\t\t# \t_log.debug(\"new receiver row %s\", row_data)\n\t\titem = _model.append(None, row_data)\n\t\tif _TREE_SEPATATOR:\n\t\t\t_model.append(None, _TREE_SEPATATOR)\n\n\treturn item or None\n\n\ndef _device_row(receiver_path, device_number, device=None):\n\tassert receiver_path\n\tassert device_number is not None\n\n\treceiver_row = _receiver_row(receiver_path, None if device is None else device.receiver)\n\titem = _model.iter_children(receiver_row)\n\tnew_child_index = 0\n\twhile item:\n\t\tassert _model.get_value(item, _COLUMN.PATH) == receiver_path\n\t\titem_number = _model.get_value(item, _COLUMN.NUMBER)\n\t\tif item_number == device_number:\n\t\t\treturn item\n\t\tif item_number > device_number:\n\t\t\titem = None\n\t\t\tbreak\n\t\tnew_child_index += 1\n\t\titem = _model.iter_next(item)\n\n\tif not item and device:\n\t\ticon_name = _icons.device_icon_name(device.name, device.kind)\n\t\tstatus_text = None\n\t\tstatus_icon = None\n\t\trow_data = (receiver_path, device_number, bool(device.online), device.codename, icon_name, status_text, status_icon, device)\n\t\tassert len(row_data) == len(_TREE_SEPATATOR)\n\t\t# if _log.isEnabledFor(_DEBUG):\n\t\t# \t_log.debug(\"new device row %s at index %d\", row_data, new_child_index)\n\t\titem = _model.insert(receiver_row, new_child_index, row_data)\n\n\treturn item or None\n\n#\n#\n#\n\ndef select(receiver_path, device_number=None):\n\tassert _window\n\tassert receiver_path is not None\n\tif device_number is None:\n\t\titem = _receiver_row(receiver_path)\n\telse:\n\t\titem = _device_row(receiver_path, device_number)\n\tif item:\n\t\tselection = _tree.get_selection()\n\t\tselection.select_iter(item)\n\telse:\n\t\t_log.warn(\"select(%s, %s) failed to find an item\", receiver_path, device_number)\n\n\ndef _hide(w, _ignore=None):\n\tassert w == _window\n\t# some window managers move the window to 0,0 after hide()\n\t# so try to remember the last position\n\tposition = _window.get_position()\n\t_window.hide()\n\t_window.move(*position)\n\treturn True\n\n\ndef popup(trigger=None, receiver_path=None, device_id=None):\n\tif receiver_path:\n\t\tselect(receiver_path, device_id)\n\t_window.present()\n\treturn True\n\n\ndef toggle(trigger=None):\n\tif _window.get_visible():\n\t\t_hide(_window)\n\telse:\n\t\t_window.present()\n\n#\n#\n#\n\ndef _update_details(button):\n\tassert button\n\tvisible = button.get_active()\n\n\tif visible:\n\t\t# _details._text.set_markup('<small>reading...</small>')\n\n\t\tdef _details_items(device, read_all=False):\n\t\t\t# If read_all is False, only return stuff that is ~100% already\n\t\t\t# cached, and involves no HID++ calls.\n\n\t\t\tif device.kind is None:\n\t\t\t\tyield (_(\"Path\"), device.path)\n\t\t\t\t# 046d is the Logitech vendor id\n\t\t\t\tyield (_(\"USB id\"), '046d:' + device.product_id)\n\n\t\t\t\tif read_all:\n\t\t\t\t\tyield (_(\"Serial\"), device.serial)\n\t\t\t\telse:\n\t\t\t\t\tyield (_(\"Serial\"), '...')\n\n\t\t\telse:\n\t\t\t\t# yield ('Codename', device.codename)\n\t\t\t\tyield (_(\"Index\"), device.number)\n\t\t\t\tyield (_(\"Wireless PID\"), device.wpid)\n\t\t\t\thid_version = device.protocol\n\t\t\t\tyield (_(\"Protocol\"), 'HID++ %1.1f' % hid_version if hid_version else _('Unknown'))\n\t\t\t\tif read_all and device.polling_rate:\n\t\t\t\t\tyield (_(\"Polling rate\"), _('%(rate)d ms (%(rate_hz)dHz)') % { 'rate': device.polling_rate, 'rate_hz': 1000 // device.polling_rate })\n\n\t\t\t\tif read_all or not device.online:\n\t\t\t\t\tyield (_(\"Serial\"), device.serial)\n\t\t\t\telse:\n\t\t\t\t\tyield (_(\"Serial\"), '...')\n\n\t\t\tif read_all:\n\t\t\t\tfor fw in list(device.firmware):\n\t\t\t\t\tyield (' ' + _(str(fw.kind)), (fw.name + ' ' + fw.version).strip())\n\t\t\telif device.kind is None or device.online:\n\t\t\t\tyield (' %s' % _(\"Firmware\"), '...')\n\n\t\t\tflag_bits = device.status.get(_K.NOTIFICATION_FLAGS)\n\t\t\tif flag_bits is not None:\n\t\t\t\tflag_names = ('(%s)' % _(\"none\"),) if flag_bits == 0 else _hidpp10.NOTIFICATION_FLAG.flag_names(flag_bits)\n\t\t\t\tyield (_(\"Notifications\"), ('\\n%15s' % ' ').join(flag_names))\n\n\t\tdef _set_details(text):\n\t\t\t_details._text.set_markup(text)\n\n\t\tdef _make_text(items):\n\t\t\ttext = '\\n'.join('%-13s: %s' % i for i in items)\n\t\t\treturn '<small><tt>' + text + '</tt></small>'\n\n\t\tdef _read_slow(device):\n\t\t\titems = _details_items(selected_device, True)\n\t\t\ttext = _make_text(items)\n\t\t\tif device == _details._current_device:\n\t\t\t\tGLib.idle_add(_set_details, text)\n\n\t\tselected_device = _find_selected_device()\n\t\tassert selected_device\n\t\t_details._current_device = selected_device\n\n\t\tread_all = not (selected_device.kind is None or selected_device.online)\n\t\titems = _details_items(selected_device, read_all)\n\t\t_set_details(_make_text(items))\n\n\t\tif read_all:\n\t\t\t_details._current_device = None\n\t\telse:\n\t\t\t_ui_async(_read_slow, selected_device)\n\n\t_details.set_visible(visible)\n\n\ndef _update_receiver_panel(receiver, panel, buttons, full=False):\n\tassert receiver\n\n\tdevices_count = len(receiver)\n\n\tpaired_text = _('No device paired.') if devices_count == 0 else ngettext('%(count)s paired device.', '%(count)s paired devices.', devices_count) % { 'count': devices_count }\n\n\tif(receiver.max_devices > 0):\n\t\tpaired_text += '\\n\\n<small>%s</small>' % ngettext('Up to %(max_count)s device can be paired to this receiver.', 'Up to %(max_count)s devices can be paired to this receiver.', receiver.max_devices) % { 'max_count': receiver.max_devices }\n\telif(devices_count > 0):\n\t\tpaired_text += '\\n\\n<small>%s</small>' % _('Only one device can be paired to this receiver.')\n\n\tpanel._count.set_markup(paired_text)\n\n\tis_pairing = receiver.status.lock_open\n\tif is_pairing:\n\t\tpanel._scanning.set_visible(True)\n\t\tif not panel._spinner.get_visible():\n\t\t\tpanel._spinner.start()\n\t\tpanel._spinner.set_visible(True)\n\telse:\n\t\tpanel._scanning.set_visible(False)\n\t\tif panel._spinner.get_visible():\n\t\t\tpanel._spinner.stop()\n\t\tpanel._spinner.set_visible(False)\n\n\tpanel.set_visible(True)\n\n\t# b._insecure.set_visible(False)\n\tbuttons._unpair.set_visible(False)\n\n\tmay_pair = receiver.may_unpair and not is_pairing\n\tif may_pair and devices_count >= receiver.max_devices:\n\t\tonline_devices = tuple(n for n in range(1, receiver.max_devices) if n in receiver and receiver[n].online)\n\t\tmay_pair &= len(online_devices) < receiver.max_devices\n\tbuttons._pair.set_sensitive(may_pair)\n\tbuttons._pair.set_visible(True)\n\n\ndef _update_device_panel(device, panel, buttons, full=False):\n\tassert device\n\tis_online = bool(device.online)\n\tpanel.set_sensitive(is_online)\n\n\tbattery_level = device.status.get(_K.BATTERY_LEVEL)\n\tif battery_level is None:\n\t\ticon_name = _icons.battery()\n\t\tpanel._battery._icon.set_sensitive(False)\n\t\tpanel._battery._icon.set_from_icon_name(icon_name, _INFO_ICON_SIZE)\n\t\tpanel._battery._text.set_sensitive(True)\n\t\tpanel._battery._text.set_markup('<small>%s</small>' % _(\"unknown\"))\n\telse:\n\t\tcharging = device.status.get(_K.BATTERY_CHARGING)\n\t\ticon_name = _icons.battery(battery_level, charging)\n\t\tpanel._battery._icon.set_from_icon_name(icon_name, _INFO_ICON_SIZE)\n\t\tpanel._battery._icon.set_sensitive(True)\n\n\t\tif isinstance(battery_level, _NamedInt):\n\t\t\ttext = _(str(battery_level))\n\t\telse:\n\t\t\ttext = _(\"%(battery_percent)d%%\") % { 'battery_percent': battery_level }\n\t\tif is_online:\n\t\t\tif charging:\n\t\t\t\ttext += ' <small>(%s)</small>' % _(\"charging\")\n\t\telse:\n\t\t\ttext += ' <small>(%s)</small>' % _(\"last known\")\n\t\tpanel._battery._text.set_sensitive(is_online)\n\t\tpanel._battery._text.set_markup(text)\n\n\tif is_online:\n\t\tnot_secure = device.status.get(_K.LINK_ENCRYPTED) == False\n\t\tif not_secure:\n\t\t\tpanel._secure._text.set_text(_(\"not encrypted\"))\n\t\t\tpanel._secure._icon.set_from_icon_name('security-low', _INFO_ICON_SIZE)\n\t\t\tpanel._secure.set_tooltip_text(_(\"The wireless link between this device and its receiver is not encrypted.\\n\"\n\t\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\t\"For pointing devices (mice, trackballs, trackpads), this is a minor security issue.\\n\"\n\t\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\t\"It is, however, a major security issue for text-input devices (keyboards, numpads),\\n\"\n\t\t\t\t\t\t\"because typed text can be sniffed inconspicuously by 3rd parties within range.\"))\n\t\telse:\n\t\t\tpanel._secure._text.set_text(_(\"encrypted\"))\n\t\t\tpanel._secure._icon.set_from_icon_name('security-high', _INFO_ICON_SIZE)\n\t\t\tpanel._secure.set_tooltip_text(_(\"The wireless link between this device and its receiver is encrypted.\"))\n\t\tpanel._secure._icon.set_visible(True)\n\telse:\n\t\tpanel._secure._text.set_markup('<small>%s</small>' % _(\"offline\"))\n\t\tpanel._secure._icon.set_visible(False)\n\t\tpanel._secure.set_tooltip_text('')\n\n\tif is_online:\n\t\tlight_level = device.status.get(_K.LIGHT_LEVEL)\n\t\tif light_level is None:\n\t\t\tpanel._lux.set_visible(False)\n\t\telse:\n\t\t\tpanel._lux._icon.set_from_icon_name(_icons.lux(light_level), _INFO_ICON_SIZE)\n\t\t\tpanel._lux._text.set_text(_(\"%(light_level)d lux\") % { 'light_level': light_level })\n\t\t\tpanel._lux.set_visible(True)\n\telse:\n\t\tpanel._lux.set_visible(False)\n\n\tbuttons._pair.set_visible(False)\n\tbuttons._unpair.set_sensitive(device.receiver.may_unpair)\n\tbuttons._unpair.set_visible(True)\n\n\tpanel.set_visible(True)\n\n\tif full:\n\t\t_config_panel.update(device, is_online)\n\n\ndef _update_info_panel(device, full=False):\n\tif device is None:\n\t\t# no selected device, show the 'empty' panel\n\t\t_details.set_visible(False)\n\t\t_info.set_visible(False)\n\t\t_empty.set_visible(True)\n\t\treturn\n\n\t# a receiver must be valid\n\t# a device must be paired\n\tassert device\n\n\t_info._title.set_markup('<b>%s</b>' % device.name)\n\ticon_name = _icons.device_icon_name(device.name, device.kind)\n\t_info._icon.set_from_icon_name(icon_name, _DEVICE_ICON_SIZE)\n\n\tif device.kind is None:\n\t\t_info._device.set_visible(False)\n\t\t_info._icon.set_sensitive(True)\n\t\t_info._title.set_sensitive(True)\n\t\t_update_receiver_panel(device, _info._receiver, _info._buttons, full)\n\telse:\n\t\t_info._receiver.set_visible(False)\n\t\tis_online = bool(device.online)\n\t\t_info._icon.set_sensitive(is_online)\n\t\t_info._title.set_sensitive(is_online)\n\t\t_update_device_panel(device, _info._device, _info._buttons, full)\n\n\t_empty.set_visible(False)\n\t_info.set_visible(True)\n\n\tif full:\n\t\t_update_details(_info._buttons._details)\n\n#\n# window layout:\n# +--------------------------------+\n# | tree | receiver | empty |\n# | | or device | |\n# |------------| status | |\n# | details | | |\n# |--------------------------------|\n# | (about) |\n# +--------------------------------|\n# either the status or empty panel is visible at any point\n# the details panel can be toggle on/off\n\n_model = None\n_tree = None\n_details = None\n_info = None\n_empty = None\n_window = None\n\n\ndef init():\n\tGtk.Window.set_default_icon_name(NAME.lower())\n\tGtk.Window.set_default_icon_from_file(_icons.icon_file(NAME.lower()))\n\n\tglobal _model, _tree, _details, _info, _empty, _window\n\t_model = Gtk.TreeStore(*_COLUMN_TYPES)\n\t_tree = _create_tree(_model)\n\t_details = _create_details_panel()\n\t_info = _create_info_panel()\n\t_empty = _create_empty_panel()\n\t_window = _create()\n\n\ndef destroy():\n\tglobal _model, _tree, _details, _info, _empty, _window\n\tw, _window = _window, None\n\tw.destroy()\n\tw = None\n\t_config_panel.destroy()\n\n\t_empty = None\n\t_info = None\n\t_details = None\n\t_tree = None\n\t_model = None\n\n\ndef update(device, need_popup=False):\n\tif _window is None:\n\t\treturn\n\n\tassert device is not None\n\n\tif need_popup:\n\t\tpopup()\n\n\tselected_device_id = _find_selected_device_id()\n\n\tif device.kind is None:\n\t\t# receiver\n\t\tis_alive = bool(device)\n\t\titem = _receiver_row(device.path, device if is_alive else None)\n\t\tassert item\n\n\t\tif is_alive and item:\n\t\t\twas_pairing = bool(_model.get_value(item, _COLUMN.STATUS_ICON))\n\t\t\tis_pairing = bool(device.status.lock_open)\n\t\t\t_model.set_value(item, _COLUMN.STATUS_ICON, 'network-wireless' if is_pairing else _CAN_SET_ROW_NONE)\n\n\t\t\tif selected_device_id == (device.path, 0):\n\t\t\t\tfull_update = need_popup or was_pairing != is_pairing\n\t\t\t\t_update_info_panel(device, full=full_update)\n\n\t\telif item:\n\t\t\tif _TREE_SEPATATOR:\n\t\t\t\tseparator = _model.iter_next(item)\n\t\t\t\t_model.remove(separator)\n\t\t\t_model.remove(item)\n\n\telse:\n\t\t# peripheral\n\t\tis_paired = bool(device)\n\t\tassert device.receiver\n\t\tassert device.number is not None and device.number > 0, \"invalid device number\" + str(device.number)\n\t\titem = _device_row(device.receiver.path, device.number, device if is_paired else None)\n\n\t\tif is_paired and item:\n\t\t\twas_online = _model.get_value(item, _COLUMN.ACTIVE)\n\t\t\tis_online = bool(device.online)\n\t\t\t_model.set_value(item, _COLUMN.ACTIVE, is_online)\n\n\t\t\tbattery_level = device.status.get(_K.BATTERY_LEVEL)\n\t\t\tif battery_level is None:\n\t\t\t\t_model.set_value(item, _COLUMN.STATUS_TEXT, _CAN_SET_ROW_NONE)\n\t\t\t\t_model.set_value(item, _COLUMN.STATUS_ICON, _CAN_SET_ROW_NONE)\n\t\t\telse:\n\t\t\t\tif isinstance(battery_level, _NamedInt):\n\t\t\t\t\tstatus_text = _(\"%(battery_level)s\") % { 'battery_level': _(str(battery_level)) }\n\t\t\t\telse:\n\t\t\t\t\tstatus_text = _(\"%(battery_percent)d%%\") % { 'battery_percent': battery_level }\n\t\t\t\t_model.set_value(item, _COLUMN.STATUS_TEXT, status_text)\n\n\t\t\t\tcharging = device.status.get(_K.BATTERY_CHARGING)\n\t\t\t\ticon_name = _icons.battery(battery_level, charging)\n\t\t\t\t_model.set_value(item, _COLUMN.STATUS_ICON, icon_name)\n\n\t\t\tif selected_device_id is None or need_popup:\n\t\t\t\tselect(device.receiver.path, device.number)\n\t\t\telif selected_device_id == (device.receiver.path, device.number):\n\t\t\t\tfull_update = need_popup or was_online != is_online\n\t\t\t\t_update_info_panel(device, full=full_update)\n\n\t\telif item:\n\t\t\t_model.remove(item)\n\t\t\t_config_panel.clean(device)\n\n\t# make sure all rows are visible\n\t_tree.expand_all()\n", "path": "lib/solaar/ui/window.py" } ]
[ { "content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger # , DEBUG as _DEBUG\n_log = getLogger(__name__)\ndel getLogger\n\nfrom gi.repository import Gtk, Gdk, GLib\nfrom gi.repository.GObject import TYPE_PYOBJECT\n\nfrom solaar import NAME\nfrom solaar.i18n import _, ngettext\n# from solaar import __version__ as VERSION\nfrom solaar.ui import ui_async as _ui_async\nfrom logitech_receiver import hidpp10 as _hidpp10\nfrom logitech_receiver.common import NamedInts as _NamedInts, NamedInt as _NamedInt\nfrom logitech_receiver.status import KEYS as _K\nfrom . import config_panel as _config_panel\nfrom . import action as _action, icons as _icons\nfrom .about import show_window as _show_about_window\n\n#\n# constants\n#\n\n_SMALL_BUTTON_ICON_SIZE = Gtk.IconSize.MENU\n_NORMAL_BUTTON_ICON_SIZE = Gtk.IconSize.BUTTON\n_TREE_ICON_SIZE = Gtk.IconSize.BUTTON\n_INFO_ICON_SIZE = Gtk.IconSize.LARGE_TOOLBAR\n_DEVICE_ICON_SIZE = Gtk.IconSize.DND\ntry:\n import gi\n gi.check_version(\"3.7.4\")\n _CAN_SET_ROW_NONE = None\nexcept (ValueError, AttributeError):\n _CAN_SET_ROW_NONE = ''\n\n# tree model columns\n_COLUMN = _NamedInts(PATH=0, NUMBER=1, ACTIVE=2, NAME=3, ICON=4, STATUS_TEXT=5, STATUS_ICON=6, DEVICE=7)\n_COLUMN_TYPES = (str, int, bool, str, str, str, str, TYPE_PYOBJECT)\n_TREE_SEPATATOR = (None, 0, False, None, None, None, None, None)\nassert len(_TREE_SEPATATOR) == len(_COLUMN_TYPES)\nassert len(_COLUMN_TYPES) == len(_COLUMN)\n\n#\n# create UI layout\n#\n\ndef _new_button(label, icon_name=None, icon_size=_NORMAL_BUTTON_ICON_SIZE, tooltip=None, toggle=False, clicked=None):\n\tif toggle:\n\t\tb = Gtk.ToggleButton()\n\telse:\n\t\tb = Gtk.Button(label) if label else Gtk.Button()\n\n\tif icon_name:\n\t\timage = Gtk.Image.new_from_icon_name(icon_name, icon_size)\n\t\tb.set_image(image)\n\n\tif tooltip:\n\t\tb.set_tooltip_text(tooltip)\n\n\tif not label and icon_size < _NORMAL_BUTTON_ICON_SIZE:\n\t\tb.set_relief(Gtk.ReliefStyle.NONE)\n\t\tb.set_focus_on_click(False)\n\n\tif clicked is not None:\n\t\tb.connect('clicked', clicked)\n\n\treturn b\n\n\ndef _create_receiver_panel():\n\tp = Gtk.Box.new(Gtk.Orientation.VERTICAL, 4)\n\n\tp._count = Gtk.Label()\n\tp._count.set_padding(24, 0)\n\tp._count.set_alignment(0, 0.5)\n\tp.pack_start(p._count, True, True, 0)\n\n\tp._scanning = Gtk.Label(_(\"Scanning\") + '...')\n\tp._spinner = Gtk.Spinner()\n\n\tbp = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 8)\n\tbp.pack_start(Gtk.Label(' '), True, True, 0)\n\tbp.pack_start(p._scanning, False, False, 0)\n\tbp.pack_end(p._spinner, False, False, 0)\n\tp.pack_end(bp, False, False, 0)\n\n\treturn p\n\n\ndef _create_device_panel():\n\tp = Gtk.Box.new(Gtk.Orientation.VERTICAL, 4)\n\n\tdef _status_line(label_text):\n\t\tb = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 8)\n\t\tb.set_size_request(10, 28)\n\n\t\tb._label = Gtk.Label(label_text)\n\t\tb._label.set_alignment(0, 0.5)\n\t\tb._label.set_size_request(170, 10)\n\t\tb.pack_start(b._label, False, False, 0)\n\n\t\tb._icon = Gtk.Image()\n\t\tb.pack_start(b._icon, False, False, 0)\n\n\t\tb._text = Gtk.Label()\n\t\tb._text.set_alignment(0, 0.5)\n\t\tb.pack_start(b._text, True, True, 0)\n\n\t\treturn b\n\n\tp._battery = _status_line(_(\"Battery\"))\n\tp.pack_start(p._battery, False, False, 0)\n\n\tp._secure = _status_line(_(\"Wireless Link\"))\n\tp._secure._icon.set_from_icon_name('dialog-warning', _INFO_ICON_SIZE)\n\tp.pack_start(p._secure, False, False, 0)\n\n\tp._lux = _status_line(_(\"Lighting\"))\n\tp.pack_start(p._lux, False, False, 0)\n\n\tp._config = _config_panel.create()\n\tp.pack_end(p._config, False, False, 4)\n\n\treturn p\n\n\ndef _create_details_panel():\n\tp = Gtk.Frame()\n\tp.set_shadow_type(Gtk.ShadowType.NONE)\n\tp.set_size_request(240, 0)\n\tp.set_state_flags(Gtk.StateFlags.ACTIVE, True)\n\n\tp._text = Gtk.Label()\n\tp._text.set_padding(6, 4)\n\tp._text.set_alignment(0, 0)\n\tp._text.set_selectable(True)\n\tp.add(p._text)\n\n\treturn p\n\n\ndef _create_buttons_box():\n\tbb = Gtk.ButtonBox(Gtk.Orientation.HORIZONTAL)\n\tbb.set_layout(Gtk.ButtonBoxStyle.END)\n\n\tbb._details = _new_button(None, 'dialog-information', _SMALL_BUTTON_ICON_SIZE,\n\t\t\t\t\ttooltip=_(\"Show Technical Details\"), toggle=True, clicked=_update_details)\n\tbb.add(bb._details)\n\tbb.set_child_secondary(bb._details, True)\n\tbb.set_child_non_homogeneous(bb._details, True)\n\n\tdef _pair_new_device(trigger):\n\t\tassert _find_selected_device_id() is not None\n\t\treceiver = _find_selected_device()\n\t\tassert receiver is not None\n\t\tassert bool(receiver)\n\t\tassert receiver.kind is None\n\t\t_action.pair(_window, receiver)\n\n\tbb._pair = _new_button(_(\"Pair new device\"), 'list-add', clicked=_pair_new_device)\n\tbb.add(bb._pair)\n\n\tdef _unpair_current_device(trigger):\n\t\tassert _find_selected_device_id() is not None\n\t\tdevice = _find_selected_device()\n\t\tassert device is not None\n\t\tassert bool(device)\n\t\tassert device.kind is not None\n\t\t_action.unpair(_window, device)\n\n\tbb._unpair = _new_button(_(\"Unpair\"), 'edit-delete', clicked=_unpair_current_device)\n\tbb.add(bb._unpair)\n\n\treturn bb\n\n\ndef _create_empty_panel():\n\tp = Gtk.Label()\n\tp.set_markup('<small>' + _(\"Select a device\") + '</small>')\n\tp.set_sensitive(False)\n\n\treturn p\n\n\ndef _create_info_panel():\n\tp = Gtk.Box.new(Gtk.Orientation.VERTICAL, 4)\n\n\tp._title = Gtk.Label(' ')\n\tp._title.set_alignment(0, 0.5)\n\tp._icon = Gtk.Image()\n\n\tb1 = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 4)\n\tb1.pack_start(p._title, True, True, 0)\n\tb1.pack_start(p._icon, False, False, 0)\n\tp.pack_start(b1, False, False, 0)\n\n\tp.pack_start(Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 0) # spacer\n\n\tp._receiver = _create_receiver_panel()\n\tp.pack_start(p._receiver, True, True, 0)\n\n\tp._device = _create_device_panel()\n\tp.pack_start(p._device, True, True, 0)\n\n\tp.pack_start(Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 0) # spacer\n\n\tp._buttons = _create_buttons_box()\n\tp.pack_end(p._buttons, False, False, 0)\n\n\treturn p\n\n\ndef _create_tree(model):\n\ttree = Gtk.TreeView()\n\ttree.set_size_request(240, 0)\n\ttree.set_headers_visible(False)\n\ttree.set_show_expanders(False)\n\ttree.set_level_indentation(20)\n\t# tree.set_fixed_height_mode(True)\n\ttree.set_enable_tree_lines(True)\n\ttree.set_reorderable(False)\n\ttree.set_enable_search(False)\n\ttree.set_model(model)\n\n\tdef _is_separator(model, item, _ignore=None):\n\t\treturn model.get_value(item, _COLUMN.PATH) is None\n\ttree.set_row_separator_func(_is_separator, None)\n\n\ticon_cell_renderer = Gtk.CellRendererPixbuf()\n\ticon_cell_renderer.set_property('stock-size', _TREE_ICON_SIZE)\n\ticon_column = Gtk.TreeViewColumn('Icon', icon_cell_renderer)\n\ticon_column.add_attribute(icon_cell_renderer, 'sensitive', _COLUMN.ACTIVE)\n\ticon_column.add_attribute(icon_cell_renderer, 'icon-name', _COLUMN.ICON)\n\ticon_column.set_fixed_width(1)\n\ttree.append_column(icon_column)\n\n\tname_cell_renderer = Gtk.CellRendererText()\n\tname_column = Gtk.TreeViewColumn('device name', name_cell_renderer)\n\tname_column.add_attribute(name_cell_renderer, 'sensitive', _COLUMN.ACTIVE)\n\tname_column.add_attribute(name_cell_renderer, 'text', _COLUMN.NAME)\n\tname_column.set_expand(True)\n\ttree.append_column(name_column)\n\ttree.set_expander_column(name_column)\n\n\tstatus_cell_renderer = Gtk.CellRendererText()\n\tstatus_cell_renderer.set_property('scale', 0.85)\n\tstatus_cell_renderer.set_property('xalign', 1)\n\tstatus_column = Gtk.TreeViewColumn('status text', status_cell_renderer)\n\tstatus_column.add_attribute(status_cell_renderer, 'sensitive', _COLUMN.ACTIVE)\n\tstatus_column.add_attribute(status_cell_renderer, 'text', _COLUMN.STATUS_TEXT)\n\tstatus_column.set_expand(True)\n\ttree.append_column(status_column)\n\n\tbattery_cell_renderer = Gtk.CellRendererPixbuf()\n\tbattery_cell_renderer.set_property('stock-size', _TREE_ICON_SIZE)\n\tbattery_column = Gtk.TreeViewColumn('status icon', battery_cell_renderer)\n\tbattery_column.add_attribute(battery_cell_renderer, 'sensitive', _COLUMN.ACTIVE)\n\tbattery_column.add_attribute(battery_cell_renderer, 'icon-name', _COLUMN.STATUS_ICON)\n\tbattery_column.set_fixed_width(1)\n\ttree.append_column(battery_column)\n\n\treturn tree\n\n\ndef _create_window_layout():\n\tassert _tree is not None\n\tassert _details is not None\n\tassert _info is not None\n\tassert _empty is not None\n\n\tassert _tree.get_selection().get_mode() == Gtk.SelectionMode.SINGLE\n\t_tree.get_selection().connect('changed', _device_selected)\n\n\ttree_scroll = Gtk.ScrolledWindow()\n\ttree_scroll.add(_tree)\n\ttree_scroll.set_min_content_width(_tree.get_size_request()[0])\n\ttree_scroll.set_shadow_type(Gtk.ShadowType.IN)\n\n\ttree_panel = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)\n\ttree_panel.set_homogeneous(False)\n\ttree_panel.pack_start(tree_scroll, True, True, 0)\n\ttree_panel.pack_start(_details, False, False, 0)\n\n\tpanel = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 16)\n\tpanel.pack_start(tree_panel, False, False, 0)\n\tpanel.pack_start(_info, True, True, 0)\n\tpanel.pack_start(_empty, True, True, 0)\n\n\tabout_button = _new_button(_(\"About\") + ' ' + NAME, 'help-about',\n\t\t\t\t\ticon_size=_SMALL_BUTTON_ICON_SIZE, clicked=_show_about_window)\n\n\tbottom_buttons_box = Gtk.ButtonBox(Gtk.Orientation.HORIZONTAL)\n\tbottom_buttons_box.set_layout(Gtk.ButtonBoxStyle.START)\n\tbottom_buttons_box.add(about_button)\n\n\t# solaar_version = Gtk.Label()\n\t# solaar_version.set_markup('<small>' + NAME + ' v' + VERSION + '</small>')\n\t# bottom_buttons_box.add(solaar_version)\n\t# bottom_buttons_box.set_child_secondary(solaar_version, True)\n\n\tvbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 8)\n\tvbox.set_border_width(8)\n\tvbox.pack_start(panel, True, True, 0)\n\tvbox.pack_end(bottom_buttons_box, False, False, 0)\n\tvbox.show_all()\n\n\t_details.set_visible(False)\n\t_info.set_visible(False)\n\treturn vbox\n\n\ndef _create():\n\twindow = Gtk.Window()\n\twindow.set_title(NAME)\n\twindow.set_role('status-window')\n\n\t# window.set_type_hint(Gdk.WindowTypeHint.UTILITY)\n\t# window.set_skip_taskbar_hint(True)\n\t# window.set_skip_pager_hint(True)\n\twindow.connect('delete-event', _hide)\n\n\tvbox = _create_window_layout()\n\twindow.add(vbox)\n\n\tgeometry = Gdk.Geometry()\n\tgeometry.min_width = 600\n\tgeometry.min_height = 320\n\tgeometry.max_width = 800\n\tgeometry.max_height = 600\n\twindow.set_geometry_hints(vbox, geometry, Gdk.WindowHints.MIN_SIZE | Gdk.WindowHints.MAX_SIZE)\n\twindow.set_position(Gtk.WindowPosition.CENTER)\n\n\treturn window\n\n#\n# window updates\n#\n\ndef _find_selected_device():\n\tselection = _tree.get_selection()\n\tmodel, item = selection.get_selected()\n\treturn model.get_value(item, _COLUMN.DEVICE) if item else None\n\n\ndef _find_selected_device_id():\n\tselection = _tree.get_selection()\n\tmodel, item = selection.get_selected()\n\tif item:\n\t\treturn _model.get_value(item, _COLUMN.PATH), _model.get_value(item, _COLUMN.NUMBER)\n\n\n# triggered by changing selection in the tree\ndef _device_selected(selection):\n\tmodel, item = selection.get_selected()\n\tdevice = model.get_value(item, _COLUMN.DEVICE) if item else None\n\t# if _log.isEnabledFor(_DEBUG):\n\t# \t_log.debug(\"window tree selected device %s\", device)\n\tif device:\n\t\t_update_info_panel(device, full=True)\n\telse:\n\t\t# When removing a receiver, one of its children may get automatically selected\n\t\t# before the tree had time to remove them as well.\n\t\t# Rather than chase around for another device to select, just clear the selection.\n\t\t_tree.get_selection().unselect_all()\n\t\t_update_info_panel(None, full=True)\n\n\ndef _receiver_row(receiver_path, receiver=None):\n\tassert receiver_path\n\n\titem = _model.get_iter_first()\n\twhile item:\n\t\t# first row matching the path must be the receiver one\n\t\tif _model.get_value(item, _COLUMN.PATH) == receiver_path:\n\t\t\treturn item\n\t\titem = _model.iter_next(item)\n\n\tif not item and receiver:\n\t\ticon_name = _icons.device_icon_name(receiver.name)\n\t\tstatus_text = None\n\t\tstatus_icon = None\n\t\trow_data = (receiver_path, 0, True, receiver.name, icon_name, status_text, status_icon, receiver)\n\t\tassert len(row_data) == len(_TREE_SEPATATOR)\n\t\t# if _log.isEnabledFor(_DEBUG):\n\t\t# \t_log.debug(\"new receiver row %s\", row_data)\n\t\titem = _model.append(None, row_data)\n\t\tif _TREE_SEPATATOR:\n\t\t\t_model.append(None, _TREE_SEPATATOR)\n\n\treturn item or None\n\n\ndef _device_row(receiver_path, device_number, device=None):\n\tassert receiver_path\n\tassert device_number is not None\n\n\treceiver_row = _receiver_row(receiver_path, None if device is None else device.receiver)\n\titem = _model.iter_children(receiver_row)\n\tnew_child_index = 0\n\twhile item:\n\t\tassert _model.get_value(item, _COLUMN.PATH) == receiver_path\n\t\titem_number = _model.get_value(item, _COLUMN.NUMBER)\n\t\tif item_number == device_number:\n\t\t\treturn item\n\t\tif item_number > device_number:\n\t\t\titem = None\n\t\t\tbreak\n\t\tnew_child_index += 1\n\t\titem = _model.iter_next(item)\n\n\tif not item and device:\n\t\ticon_name = _icons.device_icon_name(device.name, device.kind)\n\t\tstatus_text = None\n\t\tstatus_icon = None\n\t\trow_data = (receiver_path, device_number, bool(device.online), device.codename, icon_name, status_text, status_icon, device)\n\t\tassert len(row_data) == len(_TREE_SEPATATOR)\n\t\t# if _log.isEnabledFor(_DEBUG):\n\t\t# \t_log.debug(\"new device row %s at index %d\", row_data, new_child_index)\n\t\titem = _model.insert(receiver_row, new_child_index, row_data)\n\n\treturn item or None\n\n#\n#\n#\n\ndef select(receiver_path, device_number=None):\n\tassert _window\n\tassert receiver_path is not None\n\tif device_number is None:\n\t\titem = _receiver_row(receiver_path)\n\telse:\n\t\titem = _device_row(receiver_path, device_number)\n\tif item:\n\t\tselection = _tree.get_selection()\n\t\tselection.select_iter(item)\n\telse:\n\t\t_log.warn(\"select(%s, %s) failed to find an item\", receiver_path, device_number)\n\n\ndef _hide(w, _ignore=None):\n\tassert w == _window\n\t# some window managers move the window to 0,0 after hide()\n\t# so try to remember the last position\n\tposition = _window.get_position()\n\t_window.hide()\n\t_window.move(*position)\n\treturn True\n\n\ndef popup(trigger=None, receiver_path=None, device_id=None):\n\tif receiver_path:\n\t\tselect(receiver_path, device_id)\n\t_window.present()\n\treturn True\n\n\ndef toggle(trigger=None):\n\tif _window.get_visible():\n\t\t_hide(_window)\n\telse:\n\t\t_window.present()\n\n#\n#\n#\n\ndef _update_details(button):\n\tassert button\n\tvisible = button.get_active()\n\n\tif visible:\n\t\t# _details._text.set_markup('<small>reading...</small>')\n\n\t\tdef _details_items(device, read_all=False):\n\t\t\t# If read_all is False, only return stuff that is ~100% already\n\t\t\t# cached, and involves no HID++ calls.\n\n\t\t\tif device.kind is None:\n\t\t\t\tyield (_(\"Path\"), device.path)\n\t\t\t\t# 046d is the Logitech vendor id\n\t\t\t\tyield (_(\"USB id\"), '046d:' + device.product_id)\n\n\t\t\t\tif read_all:\n\t\t\t\t\tyield (_(\"Serial\"), device.serial)\n\t\t\t\telse:\n\t\t\t\t\tyield (_(\"Serial\"), '...')\n\n\t\t\telse:\n\t\t\t\t# yield ('Codename', device.codename)\n\t\t\t\tyield (_(\"Index\"), device.number)\n\t\t\t\tyield (_(\"Wireless PID\"), device.wpid)\n\t\t\t\thid_version = device.protocol\n\t\t\t\tyield (_(\"Protocol\"), 'HID++ %1.1f' % hid_version if hid_version else _('Unknown'))\n\t\t\t\tif read_all and device.polling_rate:\n\t\t\t\t\tyield (_(\"Polling rate\"), _('%(rate)d ms (%(rate_hz)dHz)') % { 'rate': device.polling_rate, 'rate_hz': 1000 // device.polling_rate })\n\n\t\t\t\tif read_all or not device.online:\n\t\t\t\t\tyield (_(\"Serial\"), device.serial)\n\t\t\t\telse:\n\t\t\t\t\tyield (_(\"Serial\"), '...')\n\n\t\t\tif read_all:\n\t\t\t\tfor fw in list(device.firmware):\n\t\t\t\t\tyield (' ' + _(str(fw.kind)), (fw.name + ' ' + fw.version).strip())\n\t\t\telif device.kind is None or device.online:\n\t\t\t\tyield (' %s' % _(\"Firmware\"), '...')\n\n\t\t\tflag_bits = device.status.get(_K.NOTIFICATION_FLAGS)\n\t\t\tif flag_bits is not None:\n\t\t\t\tflag_names = ('(%s)' % _(\"none\"),) if flag_bits == 0 else _hidpp10.NOTIFICATION_FLAG.flag_names(flag_bits)\n\t\t\t\tyield (_(\"Notifications\"), ('\\n%15s' % ' ').join(flag_names))\n\n\t\tdef _set_details(text):\n\t\t\t_details._text.set_markup(text)\n\n\t\tdef _make_text(items):\n\t\t\ttext = '\\n'.join('%-13s: %s' % i for i in items)\n\t\t\treturn '<small><tt>' + text + '</tt></small>'\n\n\t\tdef _read_slow(device):\n\t\t\titems = _details_items(selected_device, True)\n\t\t\ttext = _make_text(items)\n\t\t\tif device == _details._current_device:\n\t\t\t\tGLib.idle_add(_set_details, text)\n\n\t\tselected_device = _find_selected_device()\n\t\tassert selected_device\n\t\t_details._current_device = selected_device\n\n\t\tread_all = not (selected_device.kind is None or selected_device.online)\n\t\titems = _details_items(selected_device, read_all)\n\t\t_set_details(_make_text(items))\n\n\t\tif read_all:\n\t\t\t_details._current_device = None\n\t\telse:\n\t\t\t_ui_async(_read_slow, selected_device)\n\n\t_details.set_visible(visible)\n\n\ndef _update_receiver_panel(receiver, panel, buttons, full=False):\n\tassert receiver\n\n\tdevices_count = len(receiver)\n\n\tpaired_text = _('No device paired.') if devices_count == 0 else ngettext('%(count)s paired device.', '%(count)s paired devices.', devices_count) % { 'count': devices_count }\n\n\tif(receiver.max_devices > 0):\n\t\tpaired_text += '\\n\\n<small>%s</small>' % ngettext('Up to %(max_count)s device can be paired to this receiver.', 'Up to %(max_count)s devices can be paired to this receiver.', receiver.max_devices) % { 'max_count': receiver.max_devices }\n\telif(devices_count > 0):\n\t\tpaired_text += '\\n\\n<small>%s</small>' % _('Only one device can be paired to this receiver.')\n\n\tpanel._count.set_markup(paired_text)\n\n\tis_pairing = receiver.status.lock_open\n\tif is_pairing:\n\t\tpanel._scanning.set_visible(True)\n\t\tif not panel._spinner.get_visible():\n\t\t\tpanel._spinner.start()\n\t\tpanel._spinner.set_visible(True)\n\telse:\n\t\tpanel._scanning.set_visible(False)\n\t\tif panel._spinner.get_visible():\n\t\t\tpanel._spinner.stop()\n\t\tpanel._spinner.set_visible(False)\n\n\tpanel.set_visible(True)\n\n\t# b._insecure.set_visible(False)\n\tbuttons._unpair.set_visible(False)\n\n\tmay_pair = receiver.may_unpair and not is_pairing\n\tif may_pair and devices_count >= receiver.max_devices:\n\t\tonline_devices = tuple(n for n in range(1, receiver.max_devices) if n in receiver and receiver[n].online)\n\t\tmay_pair &= len(online_devices) < receiver.max_devices\n\tbuttons._pair.set_sensitive(may_pair)\n\tbuttons._pair.set_visible(True)\n\n\ndef _update_device_panel(device, panel, buttons, full=False):\n\tassert device\n\tis_online = bool(device.online)\n\tpanel.set_sensitive(is_online)\n\n\tbattery_level = device.status.get(_K.BATTERY_LEVEL)\n\tif battery_level is None:\n\t\ticon_name = _icons.battery()\n\t\tpanel._battery._icon.set_sensitive(False)\n\t\tpanel._battery._icon.set_from_icon_name(icon_name, _INFO_ICON_SIZE)\n\t\tpanel._battery._text.set_sensitive(True)\n\t\tpanel._battery._text.set_markup('<small>%s</small>' % _(\"unknown\"))\n\telse:\n\t\tcharging = device.status.get(_K.BATTERY_CHARGING)\n\t\ticon_name = _icons.battery(battery_level, charging)\n\t\tpanel._battery._icon.set_from_icon_name(icon_name, _INFO_ICON_SIZE)\n\t\tpanel._battery._icon.set_sensitive(True)\n\n\t\tif isinstance(battery_level, _NamedInt):\n\t\t\ttext = _(str(battery_level))\n\t\telse:\n\t\t\ttext = _(\"%(battery_percent)d%%\") % { 'battery_percent': battery_level }\n\t\tif is_online:\n\t\t\tif charging:\n\t\t\t\ttext += ' <small>(%s)</small>' % _(\"charging\")\n\t\telse:\n\t\t\ttext += ' <small>(%s)</small>' % _(\"last known\")\n\t\tpanel._battery._text.set_sensitive(is_online)\n\t\tpanel._battery._text.set_markup(text)\n\n\tif is_online:\n\t\tnot_secure = device.status.get(_K.LINK_ENCRYPTED) == False\n\t\tif not_secure:\n\t\t\tpanel._secure._text.set_text(_(\"not encrypted\"))\n\t\t\tpanel._secure._icon.set_from_icon_name('security-low', _INFO_ICON_SIZE)\n\t\t\tpanel._secure.set_tooltip_text(_(\"The wireless link between this device and its receiver is not encrypted.\\n\"\n\t\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\t\"For pointing devices (mice, trackballs, trackpads), this is a minor security issue.\\n\"\n\t\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\t\"It is, however, a major security issue for text-input devices (keyboards, numpads),\\n\"\n\t\t\t\t\t\t\"because typed text can be sniffed inconspicuously by 3rd parties within range.\"))\n\t\telse:\n\t\t\tpanel._secure._text.set_text(_(\"encrypted\"))\n\t\t\tpanel._secure._icon.set_from_icon_name('security-high', _INFO_ICON_SIZE)\n\t\t\tpanel._secure.set_tooltip_text(_(\"The wireless link between this device and its receiver is encrypted.\"))\n\t\tpanel._secure._icon.set_visible(True)\n\telse:\n\t\tpanel._secure._text.set_markup('<small>%s</small>' % _(\"offline\"))\n\t\tpanel._secure._icon.set_visible(False)\n\t\tpanel._secure.set_tooltip_text('')\n\n\tif is_online:\n\t\tlight_level = device.status.get(_K.LIGHT_LEVEL)\n\t\tif light_level is None:\n\t\t\tpanel._lux.set_visible(False)\n\t\telse:\n\t\t\tpanel._lux._icon.set_from_icon_name(_icons.lux(light_level), _INFO_ICON_SIZE)\n\t\t\tpanel._lux._text.set_text(_(\"%(light_level)d lux\") % { 'light_level': light_level })\n\t\t\tpanel._lux.set_visible(True)\n\telse:\n\t\tpanel._lux.set_visible(False)\n\n\tbuttons._pair.set_visible(False)\n\tbuttons._unpair.set_sensitive(device.receiver.may_unpair)\n\tbuttons._unpair.set_visible(True)\n\n\tpanel.set_visible(True)\n\n\tif full:\n\t\t_config_panel.update(device, is_online)\n\n\ndef _update_info_panel(device, full=False):\n\tif device is None:\n\t\t# no selected device, show the 'empty' panel\n\t\t_details.set_visible(False)\n\t\t_info.set_visible(False)\n\t\t_empty.set_visible(True)\n\t\treturn\n\n\t# a receiver must be valid\n\t# a device must be paired\n\tassert device\n\n\t_info._title.set_markup('<b>%s</b>' % device.name)\n\ticon_name = _icons.device_icon_name(device.name, device.kind)\n\t_info._icon.set_from_icon_name(icon_name, _DEVICE_ICON_SIZE)\n\n\tif device.kind is None:\n\t\t_info._device.set_visible(False)\n\t\t_info._icon.set_sensitive(True)\n\t\t_info._title.set_sensitive(True)\n\t\t_update_receiver_panel(device, _info._receiver, _info._buttons, full)\n\telse:\n\t\t_info._receiver.set_visible(False)\n\t\tis_online = bool(device.online)\n\t\t_info._icon.set_sensitive(is_online)\n\t\t_info._title.set_sensitive(is_online)\n\t\t_update_device_panel(device, _info._device, _info._buttons, full)\n\n\t_empty.set_visible(False)\n\t_info.set_visible(True)\n\n\tif full:\n\t\t_update_details(_info._buttons._details)\n\n#\n# window layout:\n# +--------------------------------+\n# | tree | receiver | empty |\n# | | or device | |\n# |------------| status | |\n# | details | | |\n# |--------------------------------|\n# | (about) |\n# +--------------------------------|\n# either the status or empty panel is visible at any point\n# the details panel can be toggle on/off\n\n_model = None\n_tree = None\n_details = None\n_info = None\n_empty = None\n_window = None\n\n\ndef init():\n\tGtk.Window.set_default_icon_name(NAME.lower())\n\tGtk.Window.set_default_icon_from_file(_icons.icon_file(NAME.lower()))\n\n\tglobal _model, _tree, _details, _info, _empty, _window\n\t_model = Gtk.TreeStore(*_COLUMN_TYPES)\n\t_tree = _create_tree(_model)\n\t_details = _create_details_panel()\n\t_info = _create_info_panel()\n\t_empty = _create_empty_panel()\n\t_window = _create()\n\n\ndef destroy():\n\tglobal _model, _tree, _details, _info, _empty, _window\n\tw, _window = _window, None\n\tw.destroy()\n\tw = None\n\t_config_panel.destroy()\n\n\t_empty = None\n\t_info = None\n\t_details = None\n\t_tree = None\n\t_model = None\n\n\ndef update(device, need_popup=False):\n\tif _window is None:\n\t\treturn\n\n\tassert device is not None\n\n\tif need_popup:\n\t\tpopup()\n\n\tselected_device_id = _find_selected_device_id()\n\n\tif device.kind is None:\n\t\t# receiver\n\t\tis_alive = bool(device)\n\t\titem = _receiver_row(device.path, device if is_alive else None)\n\t\tassert item\n\n\t\tif is_alive and item:\n\t\t\twas_pairing = bool(_model.get_value(item, _COLUMN.STATUS_ICON))\n\t\t\tis_pairing = bool(device.status.lock_open)\n\t\t\t_model.set_value(item, _COLUMN.STATUS_ICON, 'network-wireless' if is_pairing else _CAN_SET_ROW_NONE)\n\n\t\t\tif selected_device_id == (device.path, 0):\n\t\t\t\tfull_update = need_popup or was_pairing != is_pairing\n\t\t\t\t_update_info_panel(device, full=full_update)\n\n\t\telif item:\n\t\t\tif _TREE_SEPATATOR:\n\t\t\t\tseparator = _model.iter_next(item)\n\t\t\t\t_model.remove(separator)\n\t\t\t_model.remove(item)\n\n\telse:\n\t\t# peripheral\n\t\tis_paired = bool(device)\n\t\tassert device.receiver\n\t\tassert device.number is not None and device.number > 0, \"invalid device number\" + str(device.number)\n\t\titem = _device_row(device.receiver.path, device.number, device if is_paired else None)\n\n\t\tif is_paired and item:\n\t\t\twas_online = _model.get_value(item, _COLUMN.ACTIVE)\n\t\t\tis_online = bool(device.online)\n\t\t\t_model.set_value(item, _COLUMN.ACTIVE, is_online)\n\n\t\t\tbattery_level = device.status.get(_K.BATTERY_LEVEL)\n\t\t\tif battery_level is None:\n\t\t\t\t_model.set_value(item, _COLUMN.STATUS_TEXT, _CAN_SET_ROW_NONE)\n\t\t\t\t_model.set_value(item, _COLUMN.STATUS_ICON, _CAN_SET_ROW_NONE)\n\t\t\telse:\n\t\t\t\tif isinstance(battery_level, _NamedInt):\n\t\t\t\t\tstatus_text = _(\"%(battery_level)s\") % { 'battery_level': _(str(battery_level)) }\n\t\t\t\telse:\n\t\t\t\t\tstatus_text = _(\"%(battery_percent)d%%\") % { 'battery_percent': battery_level }\n\t\t\t\t_model.set_value(item, _COLUMN.STATUS_TEXT, status_text)\n\n\t\t\t\tcharging = device.status.get(_K.BATTERY_CHARGING)\n\t\t\t\ticon_name = _icons.battery(battery_level, charging)\n\t\t\t\t_model.set_value(item, _COLUMN.STATUS_ICON, icon_name)\n\n\t\t\tif selected_device_id is None or need_popup:\n\t\t\t\tselect(device.receiver.path, device.number)\n\t\t\telif selected_device_id == (device.receiver.path, device.number):\n\t\t\t\tfull_update = need_popup or was_online != is_online\n\t\t\t\t_update_info_panel(device, full=full_update)\n\n\t\telif item:\n\t\t\t_model.remove(item)\n\t\t\t_config_panel.clean(device)\n\n\t# make sure all rows are visible\n\t_tree.expand_all()\n", "path": "lib/solaar/ui/window.py" } ]
diff --git a/lib/solaar/ui/window.py b/lib/solaar/ui/window.py index 446213d220..4c87da30d7 100644 --- a/lib/solaar/ui/window.py +++ b/lib/solaar/ui/window.py @@ -337,7 +337,6 @@ def _create(): # window.set_type_hint(Gdk.WindowTypeHint.UTILITY) # window.set_skip_taskbar_hint(True) # window.set_skip_pager_hint(True) - window.set_keep_above(True) window.connect('delete-event', _hide) vbox = _create_window_layout()
spack__spack-26095
CentOS 6 image doesn't build with clingo on Dockerhub ### Steps to reproduce Has to do with failure on centos:6 ``` Step 17/19 : RUN spack spec hdf5+mpi ---> Running in 8335d48ff53f ==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification. ==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification. ==> Warning: the original concretizer is currently being used. Upgrade to "clingo" at your earliest convenience. The original concretizer will be removed from Spack starting at v0.18.0 ==> Error: cannot bootstrap the "clingo" Python module from spec "clingo-bootstrap@spack+python %gcc target=x86_64" Input spec -------------------------------- hdf5+mpi Concretized -------------------------------- ==> Bootstrapping clingo from pre-built binaries The command 'docker-shell spack spec hdf5+mpi' returned a non-zero code: 3 ``` --- So it bootstraps *during* concretization?
[ { "content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\"\"\"Schema for the 'container' subsection of Spack environments.\"\"\"\n\n_stages_from_dockerhub = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'os': {\n 'type': 'string',\n 'enum': ['ubuntu:18.04',\n 'ubuntu:16.04',\n 'centos:7',\n 'centos:6']\n },\n 'spack': {\n 'type': 'string',\n },\n },\n 'required': ['os', 'spack']\n}\n\n_custom_stages = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n },\n 'required': ['build', 'final']\n}\n\n#: List of packages for the schema below\n_list_of_packages = {\n 'type': 'array',\n 'items': {\n 'type': 'string'\n }\n}\n\n#: Schema for the container attribute included in Spack environments\ncontainer_schema = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n # The recipe formats that are currently supported by the command\n 'format': {\n 'type': 'string',\n 'enum': ['docker', 'singularity']\n },\n # Describes the base image to start from and the version\n # of Spack to be used\n 'images': {'anyOf': [_stages_from_dockerhub, _custom_stages]},\n # Whether or not to strip installed binaries\n 'strip': {\n 'type': 'boolean',\n 'default': True\n },\n # Additional system packages that are needed at runtime\n 'os_packages': {\n 'type': 'object',\n 'properties': {\n 'command': {'type': 'string', 'enum': ['apt', 'yum']},\n 'update': {'type': 'boolean'},\n 'build': _list_of_packages,\n 'final': _list_of_packages\n },\n 'additionalProperties': False\n },\n # Add labels to the image\n 'labels': {\n 'type': 'object',\n },\n # Add a custom extra section at the bottom of a stage\n 'extra_instructions': {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n }\n },\n # Reserved for properties that are specific to each format\n 'singularity': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n 'properties': {\n 'runscript': {'type': 'string'},\n 'startscript': {'type': 'string'},\n 'test': {'type': 'string'},\n 'help': {'type': 'string'}\n }\n },\n 'docker': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n }\n }\n}\n\nproperties = {'container': container_schema}\n", "path": "lib/spack/spack/schema/container.py" } ]
[ { "content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\"\"\"Schema for the 'container' subsection of Spack environments.\"\"\"\n\n_stages_from_dockerhub = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'os': {\n 'type': 'string',\n 'enum': ['ubuntu:18.04',\n 'ubuntu:16.04',\n 'centos:7']\n },\n 'spack': {\n 'type': 'string',\n },\n },\n 'required': ['os', 'spack']\n}\n\n_custom_stages = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n },\n 'required': ['build', 'final']\n}\n\n#: List of packages for the schema below\n_list_of_packages = {\n 'type': 'array',\n 'items': {\n 'type': 'string'\n }\n}\n\n#: Schema for the container attribute included in Spack environments\ncontainer_schema = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n # The recipe formats that are currently supported by the command\n 'format': {\n 'type': 'string',\n 'enum': ['docker', 'singularity']\n },\n # Describes the base image to start from and the version\n # of Spack to be used\n 'images': {'anyOf': [_stages_from_dockerhub, _custom_stages]},\n # Whether or not to strip installed binaries\n 'strip': {\n 'type': 'boolean',\n 'default': True\n },\n # Additional system packages that are needed at runtime\n 'os_packages': {\n 'type': 'object',\n 'properties': {\n 'command': {'type': 'string', 'enum': ['apt', 'yum']},\n 'update': {'type': 'boolean'},\n 'build': _list_of_packages,\n 'final': _list_of_packages\n },\n 'additionalProperties': False\n },\n # Add labels to the image\n 'labels': {\n 'type': 'object',\n },\n # Add a custom extra section at the bottom of a stage\n 'extra_instructions': {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n }\n },\n # Reserved for properties that are specific to each format\n 'singularity': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n 'properties': {\n 'runscript': {'type': 'string'},\n 'startscript': {'type': 'string'},\n 'test': {'type': 'string'},\n 'help': {'type': 'string'}\n }\n },\n 'docker': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n }\n }\n}\n\nproperties = {'container': container_schema}\n", "path": "lib/spack/spack/schema/container.py" } ]
diff --git a/lib/spack/docs/containers.rst b/lib/spack/docs/containers.rst index e88b55a2263644..4364b5d4dbb2ad 100644 --- a/lib/spack/docs/containers.rst +++ b/lib/spack/docs/containers.rst @@ -126,9 +126,6 @@ are currently supported are summarized in the table below: * - Ubuntu 18.04 - ``ubuntu:18.04`` - ``spack/ubuntu-bionic`` - * - CentOS 6 - - ``centos:6`` - - ``spack/centos6`` * - CentOS 7 - ``centos:7`` - ``spack/centos7`` diff --git a/lib/spack/spack/container/images.json b/lib/spack/spack/container/images.json index cb495908c93a24..9461d576d13cbf 100644 --- a/lib/spack/spack/container/images.json +++ b/lib/spack/spack/container/images.json @@ -21,13 +21,6 @@ "build_tags": { "develop": "latest" } - }, - "centos:6": { - "os_package_manager": "yum", - "build": "spack/centos6", - "build_tags": { - "develop": "latest" - } } }, "os_package_managers": { diff --git a/lib/spack/spack/schema/container.py b/lib/spack/spack/schema/container.py index 5223efa041c93b..2011b428538cca 100644 --- a/lib/spack/spack/schema/container.py +++ b/lib/spack/spack/schema/container.py @@ -12,8 +12,7 @@ 'type': 'string', 'enum': ['ubuntu:18.04', 'ubuntu:16.04', - 'centos:7', - 'centos:6'] + 'centos:7'] }, 'spack': { 'type': 'string', diff --git a/share/spack/docker/centos-6.dockerfile b/share/spack/docker/centos-6.dockerfile deleted file mode 100644 index b0186f2ff838f9..00000000000000 --- a/share/spack/docker/centos-6.dockerfile +++ /dev/null @@ -1,74 +0,0 @@ -FROM centos:6 -MAINTAINER Spack Maintainers <[email protected]> - -ENV DOCKERFILE_BASE=centos \ - DOCKERFILE_DISTRO=centos \ - DOCKERFILE_DISTRO_VERSION=6 \ - SPACK_ROOT=/opt/spack \ - DEBIAN_FRONTEND=noninteractive \ - CURRENTLY_BUILDING_DOCKER_IMAGE=1 \ - container=docker - -# Make yum usable again with CentOS 6 -RUN curl https://www.getpagespeed.com/files/centos6-eol.repo --output /etc/yum.repos.d/CentOS-Base.repo - -RUN yum update -y \ - && yum install -y epel-release \ - && yum update -y \ - && yum --enablerepo epel groupinstall -y "Development Tools" \ - && yum --enablerepo epel install -y \ - curl \ - findutils \ - gcc-c++ \ - gcc \ - gcc-gfortran \ - git \ - gnupg2 \ - hostname \ - iproute \ - Lmod \ - make \ - patch \ - python \ - python-pip \ - python-setuptools \ - tcl \ - unzip \ - which \ - && rm -rf /var/cache/yum \ - && yum clean all - -COPY bin $SPACK_ROOT/bin -COPY etc $SPACK_ROOT/etc -COPY lib $SPACK_ROOT/lib -COPY share $SPACK_ROOT/share -COPY var $SPACK_ROOT/var -RUN mkdir -p $SPACK_ROOT/opt/spack - -RUN ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \ - /usr/local/bin/docker-shell \ - && ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \ - /usr/local/bin/interactive-shell \ - && ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \ - /usr/local/bin/spack-env - -RUN mkdir -p /root/.spack \ - && cp $SPACK_ROOT/share/spack/docker/modules.yaml \ - /root/.spack/modules.yaml \ - && rm -rf /root/*.* /run/nologin $SPACK_ROOT/.git - -# [WORKAROUND] -# https://superuser.com/questions/1241548/ -# xubuntu-16-04-ttyname-failed-inappropriate-ioctl-for-device#1253889 -RUN [ -f ~/.profile ] \ - && sed -i 's/mesg n/( tty -s \\&\\& mesg n || true )/g' ~/.profile \ - || true - -WORKDIR /root -SHELL ["docker-shell"] - -# TODO: add a command to Spack that (re)creates the package cache -RUN spack spec hdf5+mpi - -ENTRYPOINT ["/bin/bash", "/opt/spack/share/spack/docker/entrypoint.bash"] -CMD ["interactive-shell"]