in_source_id
stringlengths
13
58
issue
stringlengths
3
241k
before_files
listlengths
0
3
after_files
listlengths
0
3
pr_diff
stringlengths
109
107M
neptune-ai__neptune-client-899
BUG: Inconsistency in `fetch_run_table` tag selector Hi, according to `fetch_run_table`'s documentation: > Only experiments that have all specified tags will match this criterion. Thus, the query aggregator for tags should be `AND`. https://github.com/neptune-ai/neptune-client/blob/4b164bc470278edd160b7b9b5059a32c4822b376/neptune/new/metadata_containers/project.py#L235
[ { "content": "#\n# Copyright (c) 2020, Neptune Labs Sp. z o.o.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport threading\nfrom typing import Any, Dict, Iterable, Optional, Union\n\nfrom neptune.new.internal.backends.neptune_backend import NeptuneBackend\nfrom neptune.new.internal.backends.nql import (\n NQLAggregator,\n NQLAttributeOperator,\n NQLAttributeType,\n NQLQueryAggregate,\n NQLQueryAttribute,\n)\nfrom neptune.new.internal.background_job import BackgroundJob\nfrom neptune.new.internal.container_type import ContainerType\nfrom neptune.new.internal.id_formats import SysId, UniqueId\nfrom neptune.new.internal.operation_processors.operation_processor import (\n OperationProcessor,\n)\nfrom neptune.new.internal.utils import as_list\nfrom neptune.new.metadata_containers import MetadataContainer\nfrom neptune.new.metadata_containers.metadata_containers_table import Table\nfrom neptune.new.types.mode import Mode\n\n\nclass Project(MetadataContainer):\n \"\"\"A class for managing a Neptune project and retrieving information from it.\n\n You may also want to check `Project docs page`_.\n\n .. _Project docs page:\n https://docs.neptune.ai/api-reference/project\n \"\"\"\n\n container_type = ContainerType.PROJECT\n\n def __init__(\n self,\n *,\n id_: UniqueId,\n mode: Mode,\n backend: NeptuneBackend,\n op_processor: OperationProcessor,\n background_job: BackgroundJob,\n lock: threading.RLock,\n workspace: str,\n project_name: str,\n sys_id: SysId,\n ):\n super().__init__(\n id_=id_,\n mode=mode,\n backend=backend,\n op_processor=op_processor,\n background_job=background_job,\n lock=lock,\n project_id=id_,\n project_name=project_name,\n workspace=workspace,\n sys_id=sys_id,\n )\n\n @property\n def _docs_url_stop(self) -> str:\n return \"https://docs.neptune.ai/api-reference/project#.stop\"\n\n @property\n def _label(self) -> str:\n return f\"{self._workspace}/{self._project_name}\"\n\n @property\n def _url(self) -> str:\n return self._backend.get_project_url(\n project_id=self._id,\n workspace=self._workspace,\n project_name=self._project_name,\n )\n\n @property\n def _metadata_url(self) -> str:\n return self._url.rstrip(\"/\") + \"/metadata\"\n\n # pylint:disable=redefined-builtin\n def fetch_runs_table(\n self,\n id: Optional[Union[str, Iterable[str]]] = None,\n state: Optional[Union[str, Iterable[str]]] = None,\n owner: Optional[Union[str, Iterable[str]]] = None,\n tag: Optional[Union[str, Iterable[str]]] = None,\n ) -> Table:\n \"\"\"Retrieve runs matching the specified criteria.\n\n All parameters are optional, each of them specifies a single criterion.\n Only runs matching all of the criteria will be returned.\n\n Args:\n id (str or list of str, optional): A run's id or list of ids.\n E.g. `'SAN-1'` or `['SAN-1', 'SAN-2']`.\n Matching any element of the list is sufficient to pass the criterion.\n Defaults to `None`.\n state (str or list of str, optional): A run's state like or list of states.\n E.g. `'running'` or `['idle', 'running']`.\n Possible values: 'idle', 'running'.\n Defaults to `None`.\n Matching any element of the list is sufficient to pass the criterion.\n owner (str or list of str, optional): Username of the run's owner or a list of owners.\n E.g. 'josh' or ['frederic', 'josh'].\n The user who created the tracked run is an owner.\n Defaults to `None`.\n Matching any element of the list is sufficient to pass the criterion.\n tag (str or list of str, optional): An experiment tag or list of tags.\n E.g. `'lightGBM'` or ['pytorch', 'cycleLR'].\n Defaults to `None`.\n Only experiments that have all specified tags will match this criterion.\n\n Returns:\n ``Table``: object containing experiments matching the specified criteria.\n\n Use `.to_pandas()` to convert it to Pandas `DataFrame`.\n\n Examples:\n >>> import neptune.new as neptune\n\n >>> # Fetch project 'jackie/sandbox'\n ... project = neptune.get_project(name='jackie/sandbox')\n\n >>> # Fetch all Runs metadata as Pandas DataFrame\n ... runs_table_df = project.fetch_runs_table().to_pandas()\n\n >>> # Sort runs by creation time\n ... runs_table_df = runs_table_df.sort_values(by='sys/creation_time', ascending=False)\n\n >>> # Extract the last runs id\n ... last_run_id = runs_table_df['sys/id'].values[0]\n\n You can also filter the runs table by state, owner or tag or a combination:\n\n >>> # Fetch only inactive runs\n ... runs_table_df = project.fetch_runs_table(state='idle').to_pandas()\n\n >>> # Fetch only runs created by CI service\n ... runs_table_df = project.fetch_runs_table(owner='my_company_ci_service').to_pandas()\n\n >>> # Fetch only runs that have both 'Exploration' and 'Optuna' tag\n ... runs_table_df = project.fetch_runs_table(tag=['Exploration', 'Optuna']).to_pandas()\n\n >>> # You can combine conditions. Runs satisfying all conditions will be fetched\n ... runs_table_df = project.fetch_runs_table(state='idle', tag='Exploration').to_pandas()\n\n You may also want to check `fetch_runs_table docs page`_.\n\n .. _fetch_runs_table docs page:\n https://docs.neptune.ai/api-reference/project#fetch_runs_table\n \"\"\"\n ids = as_list(\"id\", id)\n states = as_list(\"state\", state)\n owners = as_list(\"owner\", owner)\n tags = as_list(\"tag\", tag)\n\n query_items = []\n\n if ids:\n query_items.append(\n NQLQueryAggregate(\n items=[\n NQLQueryAttribute(\n name=\"sys/id\",\n type=NQLAttributeType.STRING,\n operator=NQLAttributeOperator.EQUALS,\n value=api_id,\n )\n for api_id in ids\n ],\n aggregator=NQLAggregator.OR,\n )\n )\n\n if states:\n query_items.append(\n NQLQueryAggregate(\n items=[\n NQLQueryAttribute(\n name=\"sys/state\",\n type=NQLAttributeType.EXPERIMENT_STATE,\n operator=NQLAttributeOperator.EQUALS,\n value=state,\n )\n for state in states\n ],\n aggregator=NQLAggregator.OR,\n )\n )\n\n if owners:\n query_items.append(\n NQLQueryAggregate(\n items=[\n NQLQueryAttribute(\n name=\"sys/owner\",\n type=NQLAttributeType.STRING,\n operator=NQLAttributeOperator.EQUALS,\n value=owner,\n )\n for owner in owners\n ],\n aggregator=NQLAggregator.OR,\n )\n )\n\n if tags:\n query_items.append(\n NQLQueryAggregate(\n items=[\n NQLQueryAttribute(\n name=\"sys/tags\",\n type=NQLAttributeType.STRING_SET,\n operator=NQLAttributeOperator.CONTAINS,\n value=tag,\n )\n for tag in tags\n ],\n aggregator=NQLAggregator.OR,\n )\n )\n\n query = NQLQueryAggregate(items=query_items, aggregator=NQLAggregator.AND)\n\n return MetadataContainer._fetch_entries(\n self, child_type=ContainerType.RUN, query=query\n )\n\n def assign(self, value, wait: bool = False) -> None:\n \"\"\"Assign values to multiple fields from a dictionary.\n You can use this method to log multiple pieces of information with one command.\n Args:\n value (dict): A dictionary with values to assign, where keys become the paths of the fields.\n The dictionary can be nested - in such case the path will be a combination of all keys.\n wait (bool, optional): If `True` the client will first wait to send all tracked metadata to the server.\n This makes the call synchronous. Defaults to `False`.\n Examples:\n >>> import neptune.new as neptune\n >>> project = neptune.init_project(name=\"MY_WORKSPACE/MY_PROJECT\")\n >>> # Assign multiple fields from a dictionary\n ... general_info = {\"brief\": URL_TO_PROJECT_BRIEF, \"deadline\": \"2049-06-30\"}\n >>> project[\"general\"] = general_info\n >>> # You can always log explicitly parameters one by one\n ... project[\"general/brief\"] = URL_TO_PROJECT_BRIEF\n >>> project[\"general/deadline\"] = \"2049-06-30\"\n >>> # Dictionaries can be nested\n ... general_info = {\"brief\": {\"url\": URL_TO_PROJECT_BRIEF}}\n >>> project[\"general\"] = general_info\n >>> # This will log the url under path \"general/brief/url\"\n You may also want to check `assign docs page`_.\n .. _assign docs page:\n https://docs.neptune.ai/api-reference/project#.assign\n \"\"\"\n return MetadataContainer.assign(self, value=value, wait=wait)\n\n def fetch(self) -> dict:\n \"\"\"Fetch values of all non-File Atom fields as a dictionary.\n The result will preserve the hierarchical structure of the projects's metadata\n but will contain only non-File Atom fields.\n Returns:\n `dict` containing all non-File Atom fields values.\n Examples:\n >>> import neptune.new as neptune\n >>> project = neptune.init_project(name=\"MY_WORKSPACE/MY_PROJECT\")\n >>> # Fetch all the project metrics\n >>> project_metrics = project[\"metrics\"].fetch()\n You may also want to check `fetch docs page`_.\n .. _fetch docs page:\n https://docs.neptune.ai/api-reference/project#.fetch\n \"\"\"\n return MetadataContainer.fetch(self)\n\n def stop(self, seconds: Optional[Union[float, int]] = None) -> None:\n \"\"\"Stops the connection to the project and kills the synchronization thread.\n `.stop()` will be automatically called when a script that initialized the connection finishes\n or on the destruction of Neptune context.\n When using Neptune with Jupyter notebooks it's a good practice to stop the connection manually as it\n will be stopped automatically only when the Jupyter kernel stops.\n Args:\n seconds (int or float, optional): Seconds to wait for all tracking calls to finish\n before stopping the tracked run.\n If `None` will wait for all tracking calls to finish. Defaults to `True`.\n Examples:\n If you are initializing the connection from a script you don't need to call `.stop()`:\n >>> import neptune.new as neptune\n >>> project = neptune.init_project(name=\"MY_WORKSPACE/MY_PROJECT\")\n >>> # Your code\n ... pass\n ... # If you are executing Python script .stop()\n ... # is automatically called at the end for every Neptune object\n If you are initializing multiple connection from one script it is a good practice\n to .stop() the unneeded connections. You can also use Context Managers - Neptune\n will automatically call .stop() on the destruction of Project context:\n >>> import neptune.new as neptune\n >>> # If you are initializing multiple connections from the same script\n ... # stop the connection manually once not needed\n ... for project_name in projects:\n ... project = neptune.init_project(name=project_name)\n ... # Your code\n ... pass\n ... project.stop()\n >>> # You can also use with statement and context manager\n ... for project_name in projects:\n ... with neptune.init_project(name=project_name) as project:\n ... # Your code\n ... pass\n ... # .stop() is automatically called\n ... # when code execution exits the with statement\n .. warning::\n If you are using Jupyter notebooks for connecting to a project you need to manually invoke `.stop()`\n once the connection is not needed.\n You may also want to check `stop docs page`_.\n .. _stop docs page:\n https://docs.neptune.ai/api-reference/project#.stop\n \"\"\"\n return MetadataContainer.stop(self, seconds=seconds)\n\n def get_structure(self) -> Dict[str, Any]:\n \"\"\"Returns a project's metadata structure in form of a dictionary.\n This method can be used to traverse the project's metadata structure programmatically\n when using Neptune in automated workflows.\n .. danger::\n The returned object is a shallow copy of an internal structure.\n Any modifications to it may result in tracking malfunction.\n Returns:\n ``dict``: with the project's metadata structure.\n \"\"\"\n return MetadataContainer.get_structure(self)\n\n def print_structure(self) -> None:\n \"\"\"Pretty prints the structure of the project's metadata.\n Paths are ordered lexicographically and the whole structure is neatly colored.\n \"\"\"\n return MetadataContainer.print_structure(self)\n\n def pop(self, path: str, wait: bool = False) -> None:\n \"\"\"Removes the field or whole namespace stored under the path completely and all data associated with them.\n Args:\n path (str): Path of the field or namespace to be removed.\n wait (bool, optional): If `True` the client will first wait to send all tracked metadata to the server.\n This makes the call synchronous. Defaults to `False`.\n Examples:\n >>> import neptune.new as neptune\n >>> project = neptune.init_project(name=\"MY_WORKSPACE/MY_PROJECT\")\n >>> # Delete a field along with it's data\n ... project.pop(\"datasets/v0.4\")\n >>> # .pop() can be invoked directly on fields and namespaces\n >>> project['parameters/learning_rate'] = 0.3\n >>> # Following line\n ... project.pop(\"datasets/v0.4\")\n >>> # is equiavlent to this line\n ... project[\"datasets/v0.4\"].pop()\n >>> # or this line\n ... project[\"datasets\"].pop(\"v0.4\")\n >>> # You can also delete in batch whole namespace\n ... project[\"datasets\"].pop()\n You may also want to check `pop docs page`_.\n .. _pop docs page:\n https://docs.neptune.ai/api-reference/project#.pop\n \"\"\"\n return MetadataContainer.pop(self, path=path, wait=wait)\n\n def wait(self, disk_only=False) -> None:\n \"\"\"Wait for all the tracking calls to finish.\n Args:\n disk_only (bool, optional, default is False): If `True` the process will only wait for data to be saved\n locally from memory, but will not wait for them to reach Neptune servers.\n Defaults to `False`.\n You may also want to check `wait docs page`_.\n .. _wait docs page:\n https://docs.neptune.ai/api-reference/project#.wait\n \"\"\"\n return MetadataContainer.wait(self, disk_only=disk_only)\n\n def sync(self, wait: bool = True) -> None:\n \"\"\"Synchronizes local representation of the project with Neptune servers.\n Args:\n wait (bool, optional, default is True): If `True` the process will only wait for data to be saved\n locally from memory, but will not wait for them to reach Neptune servers.\n Defaults to `True`.\n You may also want to check `sync docs page`_.\n .. _sync docs page:\n https://docs.neptune.ai/api-reference/project#.sync\n \"\"\"\n return MetadataContainer.sync(self, wait=wait)\n", "path": "neptune/new/metadata_containers/project.py" } ]
[ { "content": "#\n# Copyright (c) 2020, Neptune Labs Sp. z o.o.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport threading\nfrom typing import Any, Dict, Iterable, Optional, Union\n\nfrom neptune.new.internal.backends.neptune_backend import NeptuneBackend\nfrom neptune.new.internal.backends.nql import (\n NQLAggregator,\n NQLAttributeOperator,\n NQLAttributeType,\n NQLQueryAggregate,\n NQLQueryAttribute,\n)\nfrom neptune.new.internal.background_job import BackgroundJob\nfrom neptune.new.internal.container_type import ContainerType\nfrom neptune.new.internal.id_formats import SysId, UniqueId\nfrom neptune.new.internal.operation_processors.operation_processor import (\n OperationProcessor,\n)\nfrom neptune.new.internal.utils import as_list\nfrom neptune.new.metadata_containers import MetadataContainer\nfrom neptune.new.metadata_containers.metadata_containers_table import Table\nfrom neptune.new.types.mode import Mode\n\n\nclass Project(MetadataContainer):\n \"\"\"A class for managing a Neptune project and retrieving information from it.\n\n You may also want to check `Project docs page`_.\n\n .. _Project docs page:\n https://docs.neptune.ai/api-reference/project\n \"\"\"\n\n container_type = ContainerType.PROJECT\n\n def __init__(\n self,\n *,\n id_: UniqueId,\n mode: Mode,\n backend: NeptuneBackend,\n op_processor: OperationProcessor,\n background_job: BackgroundJob,\n lock: threading.RLock,\n workspace: str,\n project_name: str,\n sys_id: SysId,\n ):\n super().__init__(\n id_=id_,\n mode=mode,\n backend=backend,\n op_processor=op_processor,\n background_job=background_job,\n lock=lock,\n project_id=id_,\n project_name=project_name,\n workspace=workspace,\n sys_id=sys_id,\n )\n\n @property\n def _docs_url_stop(self) -> str:\n return \"https://docs.neptune.ai/api-reference/project#.stop\"\n\n @property\n def _label(self) -> str:\n return f\"{self._workspace}/{self._project_name}\"\n\n @property\n def _url(self) -> str:\n return self._backend.get_project_url(\n project_id=self._id,\n workspace=self._workspace,\n project_name=self._project_name,\n )\n\n @property\n def _metadata_url(self) -> str:\n return self._url.rstrip(\"/\") + \"/metadata\"\n\n # pylint:disable=redefined-builtin\n def fetch_runs_table(\n self,\n id: Optional[Union[str, Iterable[str]]] = None,\n state: Optional[Union[str, Iterable[str]]] = None,\n owner: Optional[Union[str, Iterable[str]]] = None,\n tag: Optional[Union[str, Iterable[str]]] = None,\n ) -> Table:\n \"\"\"Retrieve runs matching the specified criteria.\n\n All parameters are optional, each of them specifies a single criterion.\n Only runs matching all of the criteria will be returned.\n\n Args:\n id (str or list of str, optional): A run's id or list of ids.\n E.g. `'SAN-1'` or `['SAN-1', 'SAN-2']`.\n Matching any element of the list is sufficient to pass the criterion.\n Defaults to `None`.\n state (str or list of str, optional): A run's state like or list of states.\n E.g. `'running'` or `['idle', 'running']`.\n Possible values: 'idle', 'running'.\n Defaults to `None`.\n Matching any element of the list is sufficient to pass the criterion.\n owner (str or list of str, optional): Username of the run's owner or a list of owners.\n E.g. 'josh' or ['frederic', 'josh'].\n The user who created the tracked run is an owner.\n Defaults to `None`.\n Matching any element of the list is sufficient to pass the criterion.\n tag (str or list of str, optional): An experiment tag or list of tags.\n E.g. `'lightGBM'` or ['pytorch', 'cycleLR'].\n Defaults to `None`.\n Only experiments that have all specified tags will match this criterion.\n\n Returns:\n ``Table``: object containing experiments matching the specified criteria.\n\n Use `.to_pandas()` to convert it to Pandas `DataFrame`.\n\n Examples:\n >>> import neptune.new as neptune\n\n >>> # Fetch project 'jackie/sandbox'\n ... project = neptune.get_project(name='jackie/sandbox')\n\n >>> # Fetch all Runs metadata as Pandas DataFrame\n ... runs_table_df = project.fetch_runs_table().to_pandas()\n\n >>> # Sort runs by creation time\n ... runs_table_df = runs_table_df.sort_values(by='sys/creation_time', ascending=False)\n\n >>> # Extract the last runs id\n ... last_run_id = runs_table_df['sys/id'].values[0]\n\n You can also filter the runs table by state, owner or tag or a combination:\n\n >>> # Fetch only inactive runs\n ... runs_table_df = project.fetch_runs_table(state='idle').to_pandas()\n\n >>> # Fetch only runs created by CI service\n ... runs_table_df = project.fetch_runs_table(owner='my_company_ci_service').to_pandas()\n\n >>> # Fetch only runs that have both 'Exploration' and 'Optuna' tag\n ... runs_table_df = project.fetch_runs_table(tag=['Exploration', 'Optuna']).to_pandas()\n\n >>> # You can combine conditions. Runs satisfying all conditions will be fetched\n ... runs_table_df = project.fetch_runs_table(state='idle', tag='Exploration').to_pandas()\n\n You may also want to check `fetch_runs_table docs page`_.\n\n .. _fetch_runs_table docs page:\n https://docs.neptune.ai/api-reference/project#fetch_runs_table\n \"\"\"\n ids = as_list(\"id\", id)\n states = as_list(\"state\", state)\n owners = as_list(\"owner\", owner)\n tags = as_list(\"tag\", tag)\n\n query_items = []\n\n if ids:\n query_items.append(\n NQLQueryAggregate(\n items=[\n NQLQueryAttribute(\n name=\"sys/id\",\n type=NQLAttributeType.STRING,\n operator=NQLAttributeOperator.EQUALS,\n value=api_id,\n )\n for api_id in ids\n ],\n aggregator=NQLAggregator.OR,\n )\n )\n\n if states:\n query_items.append(\n NQLQueryAggregate(\n items=[\n NQLQueryAttribute(\n name=\"sys/state\",\n type=NQLAttributeType.EXPERIMENT_STATE,\n operator=NQLAttributeOperator.EQUALS,\n value=state,\n )\n for state in states\n ],\n aggregator=NQLAggregator.OR,\n )\n )\n\n if owners:\n query_items.append(\n NQLQueryAggregate(\n items=[\n NQLQueryAttribute(\n name=\"sys/owner\",\n type=NQLAttributeType.STRING,\n operator=NQLAttributeOperator.EQUALS,\n value=owner,\n )\n for owner in owners\n ],\n aggregator=NQLAggregator.OR,\n )\n )\n\n if tags:\n query_items.append(\n NQLQueryAggregate(\n items=[\n NQLQueryAttribute(\n name=\"sys/tags\",\n type=NQLAttributeType.STRING_SET,\n operator=NQLAttributeOperator.CONTAINS,\n value=tag,\n )\n for tag in tags\n ],\n aggregator=NQLAggregator.AND,\n )\n )\n\n query = NQLQueryAggregate(items=query_items, aggregator=NQLAggregator.AND)\n\n return MetadataContainer._fetch_entries(\n self, child_type=ContainerType.RUN, query=query\n )\n\n def assign(self, value, wait: bool = False) -> None:\n \"\"\"Assign values to multiple fields from a dictionary.\n You can use this method to log multiple pieces of information with one command.\n Args:\n value (dict): A dictionary with values to assign, where keys become the paths of the fields.\n The dictionary can be nested - in such case the path will be a combination of all keys.\n wait (bool, optional): If `True` the client will first wait to send all tracked metadata to the server.\n This makes the call synchronous. Defaults to `False`.\n Examples:\n >>> import neptune.new as neptune\n >>> project = neptune.init_project(name=\"MY_WORKSPACE/MY_PROJECT\")\n >>> # Assign multiple fields from a dictionary\n ... general_info = {\"brief\": URL_TO_PROJECT_BRIEF, \"deadline\": \"2049-06-30\"}\n >>> project[\"general\"] = general_info\n >>> # You can always log explicitly parameters one by one\n ... project[\"general/brief\"] = URL_TO_PROJECT_BRIEF\n >>> project[\"general/deadline\"] = \"2049-06-30\"\n >>> # Dictionaries can be nested\n ... general_info = {\"brief\": {\"url\": URL_TO_PROJECT_BRIEF}}\n >>> project[\"general\"] = general_info\n >>> # This will log the url under path \"general/brief/url\"\n You may also want to check `assign docs page`_.\n .. _assign docs page:\n https://docs.neptune.ai/api-reference/project#.assign\n \"\"\"\n return MetadataContainer.assign(self, value=value, wait=wait)\n\n def fetch(self) -> dict:\n \"\"\"Fetch values of all non-File Atom fields as a dictionary.\n The result will preserve the hierarchical structure of the projects's metadata\n but will contain only non-File Atom fields.\n Returns:\n `dict` containing all non-File Atom fields values.\n Examples:\n >>> import neptune.new as neptune\n >>> project = neptune.init_project(name=\"MY_WORKSPACE/MY_PROJECT\")\n >>> # Fetch all the project metrics\n >>> project_metrics = project[\"metrics\"].fetch()\n You may also want to check `fetch docs page`_.\n .. _fetch docs page:\n https://docs.neptune.ai/api-reference/project#.fetch\n \"\"\"\n return MetadataContainer.fetch(self)\n\n def stop(self, seconds: Optional[Union[float, int]] = None) -> None:\n \"\"\"Stops the connection to the project and kills the synchronization thread.\n `.stop()` will be automatically called when a script that initialized the connection finishes\n or on the destruction of Neptune context.\n When using Neptune with Jupyter notebooks it's a good practice to stop the connection manually as it\n will be stopped automatically only when the Jupyter kernel stops.\n Args:\n seconds (int or float, optional): Seconds to wait for all tracking calls to finish\n before stopping the tracked run.\n If `None` will wait for all tracking calls to finish. Defaults to `True`.\n Examples:\n If you are initializing the connection from a script you don't need to call `.stop()`:\n >>> import neptune.new as neptune\n >>> project = neptune.init_project(name=\"MY_WORKSPACE/MY_PROJECT\")\n >>> # Your code\n ... pass\n ... # If you are executing Python script .stop()\n ... # is automatically called at the end for every Neptune object\n If you are initializing multiple connection from one script it is a good practice\n to .stop() the unneeded connections. You can also use Context Managers - Neptune\n will automatically call .stop() on the destruction of Project context:\n >>> import neptune.new as neptune\n >>> # If you are initializing multiple connections from the same script\n ... # stop the connection manually once not needed\n ... for project_name in projects:\n ... project = neptune.init_project(name=project_name)\n ... # Your code\n ... pass\n ... project.stop()\n >>> # You can also use with statement and context manager\n ... for project_name in projects:\n ... with neptune.init_project(name=project_name) as project:\n ... # Your code\n ... pass\n ... # .stop() is automatically called\n ... # when code execution exits the with statement\n .. warning::\n If you are using Jupyter notebooks for connecting to a project you need to manually invoke `.stop()`\n once the connection is not needed.\n You may also want to check `stop docs page`_.\n .. _stop docs page:\n https://docs.neptune.ai/api-reference/project#.stop\n \"\"\"\n return MetadataContainer.stop(self, seconds=seconds)\n\n def get_structure(self) -> Dict[str, Any]:\n \"\"\"Returns a project's metadata structure in form of a dictionary.\n This method can be used to traverse the project's metadata structure programmatically\n when using Neptune in automated workflows.\n .. danger::\n The returned object is a shallow copy of an internal structure.\n Any modifications to it may result in tracking malfunction.\n Returns:\n ``dict``: with the project's metadata structure.\n \"\"\"\n return MetadataContainer.get_structure(self)\n\n def print_structure(self) -> None:\n \"\"\"Pretty prints the structure of the project's metadata.\n Paths are ordered lexicographically and the whole structure is neatly colored.\n \"\"\"\n return MetadataContainer.print_structure(self)\n\n def pop(self, path: str, wait: bool = False) -> None:\n \"\"\"Removes the field or whole namespace stored under the path completely and all data associated with them.\n Args:\n path (str): Path of the field or namespace to be removed.\n wait (bool, optional): If `True` the client will first wait to send all tracked metadata to the server.\n This makes the call synchronous. Defaults to `False`.\n Examples:\n >>> import neptune.new as neptune\n >>> project = neptune.init_project(name=\"MY_WORKSPACE/MY_PROJECT\")\n >>> # Delete a field along with it's data\n ... project.pop(\"datasets/v0.4\")\n >>> # .pop() can be invoked directly on fields and namespaces\n >>> project['parameters/learning_rate'] = 0.3\n >>> # Following line\n ... project.pop(\"datasets/v0.4\")\n >>> # is equiavlent to this line\n ... project[\"datasets/v0.4\"].pop()\n >>> # or this line\n ... project[\"datasets\"].pop(\"v0.4\")\n >>> # You can also delete in batch whole namespace\n ... project[\"datasets\"].pop()\n You may also want to check `pop docs page`_.\n .. _pop docs page:\n https://docs.neptune.ai/api-reference/project#.pop\n \"\"\"\n return MetadataContainer.pop(self, path=path, wait=wait)\n\n def wait(self, disk_only=False) -> None:\n \"\"\"Wait for all the tracking calls to finish.\n Args:\n disk_only (bool, optional, default is False): If `True` the process will only wait for data to be saved\n locally from memory, but will not wait for them to reach Neptune servers.\n Defaults to `False`.\n You may also want to check `wait docs page`_.\n .. _wait docs page:\n https://docs.neptune.ai/api-reference/project#.wait\n \"\"\"\n return MetadataContainer.wait(self, disk_only=disk_only)\n\n def sync(self, wait: bool = True) -> None:\n \"\"\"Synchronizes local representation of the project with Neptune servers.\n Args:\n wait (bool, optional, default is True): If `True` the process will only wait for data to be saved\n locally from memory, but will not wait for them to reach Neptune servers.\n Defaults to `True`.\n You may also want to check `sync docs page`_.\n .. _sync docs page:\n https://docs.neptune.ai/api-reference/project#.sync\n \"\"\"\n return MetadataContainer.sync(self, wait=wait)\n", "path": "neptune/new/metadata_containers/project.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c596404d..3dacc0b04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Fixes - Fix computing of a multipart upload chunk size ([#897](https://github.com/neptune-ai/neptune-client/pull/897)) +- Matching all listed tags instead of any when calling `fetch_runs_table` ([#899](https://github.com/neptune-ai/neptune-client/pull/899)) ## neptune-client 0.16.2 diff --git a/e2e_tests/standard/test_base.py b/e2e_tests/standard/test_base.py index 7aecf0095..5e1cda66b 100644 --- a/e2e_tests/standard/test_base.py +++ b/e2e_tests/standard/test_base.py @@ -177,30 +177,30 @@ def test_add_and_remove_tags(self, container: MetadataContainer): class TestFetchTable(BaseE2ETest): def test_fetch_runs_table(self, environment): - tag = str(uuid.uuid4()) + tag1, tag2 = str(uuid.uuid4()), str(uuid.uuid4()) with neptune.init_run(project=environment.project) as run: - run["sys/tags"].add(tag) + run["sys/tags"].add(tag1) + run["sys/tags"].add(tag2) run["value"] = 12 run.sync() with neptune.init_run(project=environment.project) as run: - run["sys/tags"].add(tag) + run["sys/tags"].add(tag2) run["another/value"] = "testing" run.sync() - # wait for the elasticsearch cache to fill + # wait for the cache to fill time.sleep(5) project = neptune.get_project(name=environment.project) runs_table = sorted( - project.fetch_runs_table(tag=tag).to_rows(), + project.fetch_runs_table(tag=[tag1, tag2]).to_rows(), key=lambda r: r.get_attribute_value("sys/id"), ) - assert len(runs_table) == 2 + assert len(runs_table) == 1 assert runs_table[0].get_attribute_value("value") == 12 - assert runs_table[1].get_attribute_value("another/value") == "testing" @pytest.mark.parametrize("container", ["model"], indirect=True) def test_fetch_model_versions_table(self, container: Model, environment): diff --git a/neptune/new/metadata_containers/project.py b/neptune/new/metadata_containers/project.py index 3607b7d36..088bb3d21 100644 --- a/neptune/new/metadata_containers/project.py +++ b/neptune/new/metadata_containers/project.py @@ -232,7 +232,7 @@ def fetch_runs_table( ) for tag in tags ], - aggregator=NQLAggregator.OR, + aggregator=NQLAggregator.AND, ) )
svthalia__concrexit-2705
V2 events API reports wrong participants number ### Describe the bug On some event the amount of participants is not reported correctly on V2 version of the API. ### How to reproduce Steps to reproduce the behaviour: 1. Go to https://thalia.nu/api/v2/events/1005/ 2. Go to https://thalia.nu/events/1005/ 3. Observe a small difference. The difference is not present in V1 Counting the pictures of the participants matches up with V1 and not V2 ### Expected behaviour The V2 API should properly report the number of registered members
[ { "content": "import uuid\n\nfrom django.conf import settings\nfrom django.core import validators\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.db import models, router\nfrom django.db.models import Count, Q\nfrom django.db.models.deletion import Collector\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\n\nfrom queryable_properties.managers import QueryablePropertiesManager\nfrom queryable_properties.properties import AggregateProperty\nfrom tinymce.models import HTMLField\n\nfrom announcements.models import Slide\nfrom events.models import status\nfrom events.models.categories import EVENT_CATEGORIES\nfrom members.models import Member\nfrom payments.models import PaymentAmountField\nfrom pushnotifications.models import Category, ScheduledMessage\n\n\nclass Event(models.Model):\n \"\"\"Describes an event.\"\"\"\n\n objects = QueryablePropertiesManager()\n\n DEFAULT_NO_REGISTRATION_MESSAGE = _(\"No registration required\")\n\n title = models.CharField(_(\"title\"), max_length=100)\n\n description = HTMLField(\n _(\"description\"),\n )\n\n caption = models.TextField(\n _(\"caption\"),\n max_length=500,\n null=False,\n blank=False,\n help_text=_(\n \"A short text of max 500 characters for promotion and the newsletter.\"\n ),\n )\n\n start = models.DateTimeField(_(\"start time\"))\n\n end = models.DateTimeField(_(\"end time\"))\n\n organisers = models.ManyToManyField(\n \"activemembers.MemberGroup\",\n verbose_name=_(\"organisers\"),\n related_name=_(\"event_organiser\"),\n )\n\n category = models.CharField(\n max_length=40,\n choices=EVENT_CATEGORIES,\n verbose_name=_(\"category\"),\n help_text=_(\n \"Alumni: Events organised for alumni, \"\n \"Education: Education focused events, \"\n \"Career: Career focused events, \"\n \"Leisure: borrels, parties, game activities etc., \"\n \"Association Affairs: general meetings or \"\n \"any other board related events, \"\n \"Other: anything else.\"\n ),\n )\n\n registration_start = models.DateTimeField(\n _(\"registration start\"),\n null=True,\n blank=True,\n help_text=_(\n \"If you set a registration period registration will be \"\n \"required. If you don't set one, registration won't be \"\n \"required. Prefer times when people don't have lectures, \"\n \"e.g. 12:30 instead of 13:37.\"\n ),\n )\n\n registration_end = models.DateTimeField(\n _(\"registration end\"),\n null=True,\n blank=True,\n help_text=_(\n \"If you set a registration period registration will be \"\n \"required. If you don't set one, registration won't be \"\n \"required.\"\n ),\n )\n\n cancel_deadline = models.DateTimeField(_(\"cancel deadline\"), null=True, blank=True)\n\n send_cancel_email = models.BooleanField(\n _(\"send cancellation notifications\"),\n default=True,\n help_text=_(\n \"Send an email to the organising party when a member \"\n \"cancels their registration after the deadline.\"\n ),\n )\n\n optional_registrations = models.BooleanField(\n _(\"allow optional registrations\"),\n default=True,\n help_text=_(\n \"Participants can indicate their optional presence, even though \"\n \"registration is not actually required. This ignores registration \"\n \"start and end time or cancellation deadlines, optional \"\n \"registration will be enabled directly after publishing until the \"\n \"end of the event.\"\n ),\n )\n\n location = models.CharField(\n _(\"location\"),\n max_length=255,\n )\n\n map_location = models.CharField(\n _(\"location for minimap\"),\n max_length=255,\n help_text=_(\n \"Location of Huygens: Heyendaalseweg 135, Nijmegen. \"\n \"Location of Mercator 1: Toernooiveld 212, Nijmegen. \"\n \"Use the input 'discord' or 'online' for special placeholders. \"\n \"Not shown as text!!\"\n ),\n )\n\n price = PaymentAmountField(\n verbose_name=_(\"price\"),\n allow_zero=True,\n default=0,\n validators=[validators.MinValueValidator(0)],\n )\n\n fine = PaymentAmountField(\n verbose_name=_(\"fine\"),\n allow_zero=True,\n default=0,\n # Minimum fine is checked in this model's clean(), as it is only for\n # events that require registration.\n help_text=_(\"Fine if participant does not show up (at least €5).\"),\n validators=[validators.MinValueValidator(0)],\n )\n\n max_participants = models.PositiveSmallIntegerField(\n _(\"maximum number of participants\"),\n blank=True,\n null=True,\n )\n\n no_registration_message = models.CharField(\n _(\"message when there is no registration\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=(\n format_lazy(\n \"{} {}. {}\",\n _(\"Default:\"),\n DEFAULT_NO_REGISTRATION_MESSAGE,\n _(\n 'This field accepts HTML tags as well, e.g. links with &lta href=\"https://example.com\" target=\"_blank\"&gthttps://example.com&lt/a&gt'\n ),\n )\n ),\n )\n\n published = models.BooleanField(_(\"published\"), default=False)\n\n registration_reminder = models.ForeignKey(\n ScheduledMessage,\n on_delete=models.deletion.SET_NULL,\n related_name=\"registration_event\",\n blank=True,\n null=True,\n )\n start_reminder = models.ForeignKey(\n ScheduledMessage,\n on_delete=models.deletion.SET_NULL,\n related_name=\"start_event\",\n blank=True,\n null=True,\n )\n\n documents = models.ManyToManyField(\n \"documents.Document\",\n verbose_name=_(\"documents\"),\n blank=True,\n )\n\n slide = models.ForeignKey(\n Slide,\n verbose_name=\"slide\",\n help_text=_(\n \"Change the header-image on the event's info-page to one \"\n \"specific to this event.\"\n ),\n blank=True,\n on_delete=models.deletion.SET_NULL,\n null=True,\n )\n\n tpay_allowed = models.BooleanField(_(\"Allow Thalia Pay\"), default=True)\n\n shift = models.OneToOneField(\"sales.Shift\", models.SET_NULL, null=True, blank=True)\n\n mark_present_url_token = models.UUIDField(\n unique=True, default=uuid.uuid4, editable=False\n )\n\n @property\n def mark_present_url(self):\n \"\"\"Return a url that a user can use to mark themselves present.\"\"\"\n return settings.BASE_URL + reverse(\n \"events:mark-present\",\n kwargs={\n \"pk\": self.pk,\n \"token\": self.mark_present_url_token,\n },\n )\n\n @property\n def cancel_too_late_message(self):\n return _(\n \"Cancellation isn't possible anymore without having to pay \"\n \"the full costs of €\" + str(self.fine) + \". Also note that \"\n \"you will be unable to re-register.\"\n )\n\n @property\n def after_cancel_deadline(self):\n return self.cancel_deadline and self.cancel_deadline <= timezone.now()\n\n @property\n def registration_started(self):\n return self.registration_start <= timezone.now()\n\n @property\n def registration_required(self):\n return bool(self.registration_start) or bool(self.registration_end)\n\n @property\n def payment_required(self):\n return self.price != 0\n\n @property\n def has_fields(self):\n return self.registrationinformationfield_set.count() > 0\n\n participant_count = AggregateProperty(\n Count(\n \"eventregistration\",\n filter=~Q(eventregistration__date_cancelled__lt=timezone.now()),\n )\n )\n\n def reached_participants_limit(self):\n \"\"\"Is this event up to capacity?.\"\"\"\n return (\n self.max_participants is not None\n and self.max_participants <= self.participant_count\n )\n\n @property\n def registrations(self):\n \"\"\"Queryset with all non-cancelled registrations.\"\"\"\n return self.eventregistration_set.filter(date_cancelled=None)\n\n @property\n def participants(self):\n \"\"\"Return the active participants.\"\"\"\n if self.max_participants is not None:\n return self.registrations.order_by(\"date\")[: self.max_participants]\n return self.registrations.order_by(\"date\")\n\n @property\n def queue(self):\n \"\"\"Return the waiting queue.\"\"\"\n if self.max_participants is not None:\n return self.registrations.order_by(\"date\")[self.max_participants :]\n return []\n\n @property\n def cancellations(self):\n \"\"\"Return a queryset with the cancelled events.\"\"\"\n return self.eventregistration_set.exclude(date_cancelled=None).order_by(\n \"date_cancelled\"\n )\n\n @property\n def registration_allowed(self):\n now = timezone.now()\n return (\n bool(self.registration_start or self.registration_end)\n and self.registration_end > now >= self.registration_start\n )\n\n @property\n def cancellation_allowed(self):\n now = timezone.now()\n return (\n bool(self.registration_start or self.registration_end)\n and self.registration_start <= now < self.start\n )\n\n @property\n def optional_registration_allowed(self):\n return (\n self.optional_registrations\n and not self.registration_required\n and self.end >= timezone.now()\n )\n\n @property\n def has_food_event(self):\n # pylint: disable=pointless-statement\n try:\n self.food_event\n return True\n except ObjectDoesNotExist:\n return False\n\n def clean_changes(self, changed_data):\n \"\"\"Check if changes from `changed_data` are allowed.\n\n This method should be run from a form clean() method, where changed_data\n can be retrieved from self.changed_data\n \"\"\"\n errors = {}\n if self.published:\n for field in (\"price\", \"registration_start\"):\n if (\n field in changed_data\n and self.registration_start\n and self.registration_start <= timezone.now()\n ):\n errors.update(\n {\n field: _(\n \"You cannot change this field after \"\n \"the registration has started.\"\n )\n }\n )\n\n if errors:\n raise ValidationError(errors)\n\n def clean(self):\n # pylint: disable=too-many-branches\n super().clean()\n errors = {}\n if self.start is None:\n errors.update({\"start\": _(\"Start cannot have an empty date or time field\")})\n if self.end is None:\n errors.update({\"end\": _(\"End cannot have an empty date or time field\")})\n if self.start is not None and self.end is not None:\n if self.end < self.start:\n errors.update({\"end\": _(\"Can't have an event travel back in time\")})\n if self.registration_required:\n if self.optional_registrations:\n errors.update(\n {\n \"optional_registrations\": _(\n \"This is not possible when actual registrations are required.\"\n )\n }\n )\n if self.fine < 5:\n errors.update(\n {\n \"fine\": _(\n \"The fine for this event is too low \"\n \"(must be at least €5).\"\n )\n }\n )\n if self.no_registration_message:\n errors.update(\n {\n \"no_registration_message\": _(\n \"Doesn't make sense to have this \"\n \"if you require registrations.\"\n )\n }\n )\n if not self.registration_start:\n errors.update(\n {\n \"registration_start\": _(\n \"If registration is required, you need a start of \"\n \"registration\"\n )\n }\n )\n if not self.registration_end:\n errors.update(\n {\n \"registration_end\": _(\n \"If registration is required, you need an end of \"\n \"registration\"\n )\n }\n )\n if not self.cancel_deadline:\n errors.update(\n {\n \"cancel_deadline\": _(\n \"If registration is required, \"\n \"you need a deadline for the cancellation\"\n )\n }\n )\n elif self.cancel_deadline > self.start:\n errors.update(\n {\n \"cancel_deadline\": _(\n \"The cancel deadline should be\"\n \" before the start of the event.\"\n )\n }\n )\n if (\n self.registration_start\n and self.registration_end\n and (self.registration_start >= self.registration_end)\n ):\n message = _(\"Registration start should be before registration end\")\n errors.update(\n {\"registration_start\": message, \"registration_end\": message}\n )\n\n if errors:\n raise ValidationError(errors)\n\n def get_absolute_url(self):\n return reverse(\"events:event\", args=[str(self.pk)])\n\n def save(self, **kwargs):\n delete_collector = Collector(\n using=router.db_for_write(self.__class__, instance=self)\n )\n\n if not self.pk:\n super().save(**kwargs)\n\n if self.published:\n if self.registration_required:\n registration_reminder_time = (\n self.registration_start - timezone.timedelta(hours=1)\n )\n registration_reminder = ScheduledMessage()\n if (\n self.registration_reminder is not None\n and not self.registration_reminder.sent\n ):\n registration_reminder = self.registration_reminder\n\n if registration_reminder_time > timezone.now():\n registration_reminder.title = \"Event registration\"\n registration_reminder.body = (\n f\"Registration for '{self.title}' starts in 1 hour\"\n )\n registration_reminder.category = Category.objects.get(\n key=Category.EVENT\n )\n registration_reminder.time = registration_reminder_time\n registration_reminder.url = (\n f\"{settings.BASE_URL}{reverse('events:event', args=[self.id])}\"\n )\n\n registration_reminder.save()\n self.registration_reminder = registration_reminder\n self.registration_reminder.users.set(Member.current_members.all())\n elif registration_reminder.pk is not None:\n delete_collector.collect([self.registration_reminder])\n self.registration_reminder = None\n\n start_reminder_time = self.start - timezone.timedelta(hours=1)\n start_reminder = ScheduledMessage()\n if self.start_reminder is not None and not self.start_reminder.sent:\n start_reminder = self.start_reminder\n\n if start_reminder_time > timezone.now():\n start_reminder.title = \"Event\"\n start_reminder.body = f\"'{self.title}' starts in 1 hour\"\n start_reminder.category = Category.objects.get(key=Category.EVENT)\n start_reminder.time = start_reminder_time\n start_reminder.save()\n self.start_reminder = start_reminder\n if self.registration_required:\n self.start_reminder.users.set(\n [r.member for r in self.participants if r.member]\n )\n else:\n self.start_reminder.users.set(Member.current_members.all())\n elif start_reminder.pk is not None:\n delete_collector.collect([self.start_reminder])\n self.start_reminder = None\n else:\n if (\n self.registration_reminder is not None\n and not self.registration_reminder.sent\n ):\n delete_collector.collect([self.registration_reminder])\n self.registration_reminder = None\n\n if self.start_reminder is not None and not self.start_reminder.sent:\n delete_collector.collect([self.start_reminder])\n self.start_reminder = None\n\n super().save()\n delete_collector.delete()\n\n def delete(self, using=None, keep_parents=False):\n using = using or router.db_for_write(self.__class__, instance=self)\n collector = Collector(using=using)\n collector.collect([self], keep_parents=keep_parents)\n\n if (\n self.registration_reminder is not None\n and not self.registration_reminder.sent\n ):\n collector.collect([self.registration_reminder], keep_parents=keep_parents)\n if self.start_reminder is not None and not self.start_reminder.sent:\n collector.collect([self.start_reminder], keep_parents=keep_parents)\n if self.has_food_event:\n collector.add([self.food_event])\n\n return collector.delete()\n\n def __str__(self):\n return f\"{self.title}: {timezone.localtime(self.start):%Y-%m-%d %H:%M}\"\n\n DEFAULT_STATUS_MESSAGE = {\n status.STATUS_WILL_OPEN: _(\"Registration will open {regstart}.\"),\n status.STATUS_EXPIRED: _(\"Registration is not possible anymore.\"),\n status.STATUS_OPEN: _(\"You can register now.\"),\n status.STATUS_FULL: _(\n \"Registrations are full, but you can join the waiting list.\"\n ),\n status.STATUS_WAITINGLIST: _(\"You are in queue position {pos}.\"),\n status.STATUS_REGISTERED: _(\"You are registered for this event.\"),\n status.STATUS_CANCELLED: _(\n \"Your registration for this event is cancelled. You may still re-register.\"\n ),\n status.STATUS_CANCELLED_FINAL: _(\n \"Your registration for this event is cancelled. Note that you cannot re-register.\"\n ),\n status.STATUS_CANCELLED_LATE: _(\n \"Your registration is cancelled after the deadline and you will pay a fine of €{fine}.\"\n ),\n status.STATUS_OPTIONAL: _(\"You can optionally register for this event.\"),\n status.STATUS_OPTIONAL_REGISTERED: _(\n \"You are optionally registered for this event.\"\n ),\n status.STATUS_NONE: DEFAULT_NO_REGISTRATION_MESSAGE,\n status.STATUS_LOGIN: _(\n \"You have to log in before you can register for this event.\"\n ),\n }\n\n STATUS_MESSAGE_FIELDS = {\n status.STATUS_WILL_OPEN: \"registration_msg_will_open\",\n status.STATUS_EXPIRED: \"registration_msg_expired\",\n status.STATUS_OPEN: \"registration_msg_open\",\n status.STATUS_FULL: \"registration_msg_full\",\n status.STATUS_WAITINGLIST: \"registration_msg_waitinglist\",\n status.STATUS_REGISTERED: \"registration_msg_registered\",\n status.STATUS_CANCELLED_FINAL: \"registration_msg_cancelled_final\",\n status.STATUS_CANCELLED: \"registration_msg_cancelled\",\n status.STATUS_CANCELLED_LATE: \"registration_msg_cancelled_late\",\n status.STATUS_OPTIONAL: \"registration_msg_optional\",\n status.STATUS_OPTIONAL_REGISTERED: \"registration_msg_optional_registered\",\n status.STATUS_NONE: \"no_registration_message\",\n }\n\n registration_msg_will_open = models.CharField(\n _(\n \"message when registrations are still closed (and the user is not registered)\"\n ),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_WILL_OPEN],\n ),\n )\n registration_msg_expired = models.CharField(\n _(\n \"message when the registration deadline expired and the user is not registered\"\n ),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_EXPIRED],\n ),\n )\n registration_msg_open = models.CharField(\n _(\"message when registrations are open and the user is not registered\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_OPEN],\n ),\n )\n registration_msg_full = models.CharField(\n _(\n \"message when registrations are open, but full and the user is not registered\"\n ),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_FULL],\n ),\n )\n registration_msg_waitinglist = models.CharField(\n _(\"message when user is on the waiting list\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_WAITINGLIST],\n ),\n )\n registration_msg_registered = models.CharField(\n _(\"message when user is registered\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_REGISTERED],\n ),\n )\n registration_msg_cancelled = models.CharField(\n _(\"message when user cancelled their registration in time\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_CANCELLED],\n ),\n )\n registration_msg_cancelled_final = models.CharField(\n _(\n \"message when user cancelled their registration in time and cannot re-register\"\n ),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_CANCELLED_FINAL],\n ),\n )\n registration_msg_cancelled_late = models.CharField(\n _(\"message when user cancelled their registration late and will pay a fine\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_CANCELLED_LATE],\n ),\n )\n registration_msg_optional = models.CharField(\n _(\"message when registrations are optional and the user is not registered\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_OPTIONAL],\n ),\n )\n registration_msg_optional_registered = models.CharField(\n _(\"message when registrations are optional and the user is registered\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_OPTIONAL_REGISTERED],\n ),\n )\n\n class Meta:\n ordering = (\"-start\",)\n permissions = ((\"override_organiser\", \"Can access events as if organizing\"),)\n", "path": "website/events/models/event.py" } ]
[ { "content": "import uuid\n\nfrom django.conf import settings\nfrom django.core import validators\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.db import models, router\nfrom django.db.models import Count, Q\nfrom django.db.models.deletion import Collector\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\n\nfrom queryable_properties.managers import QueryablePropertiesManager\nfrom queryable_properties.properties import AggregateProperty\nfrom tinymce.models import HTMLField\n\nfrom announcements.models import Slide\nfrom events.models import status\nfrom events.models.categories import EVENT_CATEGORIES\nfrom members.models import Member\nfrom payments.models import PaymentAmountField\nfrom pushnotifications.models import Category, ScheduledMessage\n\n\nclass Event(models.Model):\n \"\"\"Describes an event.\"\"\"\n\n objects = QueryablePropertiesManager()\n\n DEFAULT_NO_REGISTRATION_MESSAGE = _(\"No registration required\")\n\n title = models.CharField(_(\"title\"), max_length=100)\n\n description = HTMLField(\n _(\"description\"),\n )\n\n caption = models.TextField(\n _(\"caption\"),\n max_length=500,\n null=False,\n blank=False,\n help_text=_(\n \"A short text of max 500 characters for promotion and the newsletter.\"\n ),\n )\n\n start = models.DateTimeField(_(\"start time\"))\n\n end = models.DateTimeField(_(\"end time\"))\n\n organisers = models.ManyToManyField(\n \"activemembers.MemberGroup\",\n verbose_name=_(\"organisers\"),\n related_name=_(\"event_organiser\"),\n )\n\n category = models.CharField(\n max_length=40,\n choices=EVENT_CATEGORIES,\n verbose_name=_(\"category\"),\n help_text=_(\n \"Alumni: Events organised for alumni, \"\n \"Education: Education focused events, \"\n \"Career: Career focused events, \"\n \"Leisure: borrels, parties, game activities etc., \"\n \"Association Affairs: general meetings or \"\n \"any other board related events, \"\n \"Other: anything else.\"\n ),\n )\n\n registration_start = models.DateTimeField(\n _(\"registration start\"),\n null=True,\n blank=True,\n help_text=_(\n \"If you set a registration period registration will be \"\n \"required. If you don't set one, registration won't be \"\n \"required. Prefer times when people don't have lectures, \"\n \"e.g. 12:30 instead of 13:37.\"\n ),\n )\n\n registration_end = models.DateTimeField(\n _(\"registration end\"),\n null=True,\n blank=True,\n help_text=_(\n \"If you set a registration period registration will be \"\n \"required. If you don't set one, registration won't be \"\n \"required.\"\n ),\n )\n\n cancel_deadline = models.DateTimeField(_(\"cancel deadline\"), null=True, blank=True)\n\n send_cancel_email = models.BooleanField(\n _(\"send cancellation notifications\"),\n default=True,\n help_text=_(\n \"Send an email to the organising party when a member \"\n \"cancels their registration after the deadline.\"\n ),\n )\n\n optional_registrations = models.BooleanField(\n _(\"allow optional registrations\"),\n default=True,\n help_text=_(\n \"Participants can indicate their optional presence, even though \"\n \"registration is not actually required. This ignores registration \"\n \"start and end time or cancellation deadlines, optional \"\n \"registration will be enabled directly after publishing until the \"\n \"end of the event.\"\n ),\n )\n\n location = models.CharField(\n _(\"location\"),\n max_length=255,\n )\n\n map_location = models.CharField(\n _(\"location for minimap\"),\n max_length=255,\n help_text=_(\n \"Location of Huygens: Heyendaalseweg 135, Nijmegen. \"\n \"Location of Mercator 1: Toernooiveld 212, Nijmegen. \"\n \"Use the input 'discord' or 'online' for special placeholders. \"\n \"Not shown as text!!\"\n ),\n )\n\n price = PaymentAmountField(\n verbose_name=_(\"price\"),\n allow_zero=True,\n default=0,\n validators=[validators.MinValueValidator(0)],\n )\n\n fine = PaymentAmountField(\n verbose_name=_(\"fine\"),\n allow_zero=True,\n default=0,\n # Minimum fine is checked in this model's clean(), as it is only for\n # events that require registration.\n help_text=_(\"Fine if participant does not show up (at least €5).\"),\n validators=[validators.MinValueValidator(0)],\n )\n\n max_participants = models.PositiveSmallIntegerField(\n _(\"maximum number of participants\"),\n blank=True,\n null=True,\n )\n\n no_registration_message = models.CharField(\n _(\"message when there is no registration\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=(\n format_lazy(\n \"{} {}. {}\",\n _(\"Default:\"),\n DEFAULT_NO_REGISTRATION_MESSAGE,\n _(\n 'This field accepts HTML tags as well, e.g. links with &lta href=\"https://example.com\" target=\"_blank\"&gthttps://example.com&lt/a&gt'\n ),\n )\n ),\n )\n\n published = models.BooleanField(_(\"published\"), default=False)\n\n registration_reminder = models.ForeignKey(\n ScheduledMessage,\n on_delete=models.deletion.SET_NULL,\n related_name=\"registration_event\",\n blank=True,\n null=True,\n )\n start_reminder = models.ForeignKey(\n ScheduledMessage,\n on_delete=models.deletion.SET_NULL,\n related_name=\"start_event\",\n blank=True,\n null=True,\n )\n\n documents = models.ManyToManyField(\n \"documents.Document\",\n verbose_name=_(\"documents\"),\n blank=True,\n )\n\n slide = models.ForeignKey(\n Slide,\n verbose_name=\"slide\",\n help_text=_(\n \"Change the header-image on the event's info-page to one \"\n \"specific to this event.\"\n ),\n blank=True,\n on_delete=models.deletion.SET_NULL,\n null=True,\n )\n\n tpay_allowed = models.BooleanField(_(\"Allow Thalia Pay\"), default=True)\n\n shift = models.OneToOneField(\"sales.Shift\", models.SET_NULL, null=True, blank=True)\n\n mark_present_url_token = models.UUIDField(\n unique=True, default=uuid.uuid4, editable=False\n )\n\n @property\n def mark_present_url(self):\n \"\"\"Return a url that a user can use to mark themselves present.\"\"\"\n return settings.BASE_URL + reverse(\n \"events:mark-present\",\n kwargs={\n \"pk\": self.pk,\n \"token\": self.mark_present_url_token,\n },\n )\n\n @property\n def cancel_too_late_message(self):\n return _(\n \"Cancellation isn't possible anymore without having to pay \"\n \"the full costs of €\" + str(self.fine) + \". Also note that \"\n \"you will be unable to re-register.\"\n )\n\n @property\n def after_cancel_deadline(self):\n return self.cancel_deadline and self.cancel_deadline <= timezone.now()\n\n @property\n def registration_started(self):\n return self.registration_start <= timezone.now()\n\n @property\n def registration_required(self):\n return bool(self.registration_start) or bool(self.registration_end)\n\n @property\n def payment_required(self):\n return self.price != 0\n\n @property\n def has_fields(self):\n return self.registrationinformationfield_set.count() > 0\n\n participant_count = AggregateProperty(\n Count(\n \"eventregistration\",\n filter=Q(eventregistration__date_cancelled=None),\n )\n )\n\n def reached_participants_limit(self):\n \"\"\"Is this event up to capacity?.\"\"\"\n return (\n self.max_participants is not None\n and self.max_participants <= self.participant_count\n )\n\n @property\n def registrations(self):\n \"\"\"Queryset with all non-cancelled registrations.\"\"\"\n return self.eventregistration_set.filter(date_cancelled=None)\n\n @property\n def participants(self):\n \"\"\"Return the active participants.\"\"\"\n if self.max_participants is not None:\n return self.registrations.order_by(\"date\")[: self.max_participants]\n return self.registrations.order_by(\"date\")\n\n @property\n def queue(self):\n \"\"\"Return the waiting queue.\"\"\"\n if self.max_participants is not None:\n return self.registrations.order_by(\"date\")[self.max_participants :]\n return []\n\n @property\n def cancellations(self):\n \"\"\"Return a queryset with the cancelled events.\"\"\"\n return self.eventregistration_set.exclude(date_cancelled=None).order_by(\n \"date_cancelled\"\n )\n\n @property\n def registration_allowed(self):\n now = timezone.now()\n return (\n bool(self.registration_start or self.registration_end)\n and self.registration_end > now >= self.registration_start\n )\n\n @property\n def cancellation_allowed(self):\n now = timezone.now()\n return (\n bool(self.registration_start or self.registration_end)\n and self.registration_start <= now < self.start\n )\n\n @property\n def optional_registration_allowed(self):\n return (\n self.optional_registrations\n and not self.registration_required\n and self.end >= timezone.now()\n )\n\n @property\n def has_food_event(self):\n # pylint: disable=pointless-statement\n try:\n self.food_event\n return True\n except ObjectDoesNotExist:\n return False\n\n def clean_changes(self, changed_data):\n \"\"\"Check if changes from `changed_data` are allowed.\n\n This method should be run from a form clean() method, where changed_data\n can be retrieved from self.changed_data\n \"\"\"\n errors = {}\n if self.published:\n for field in (\"price\", \"registration_start\"):\n if (\n field in changed_data\n and self.registration_start\n and self.registration_start <= timezone.now()\n ):\n errors.update(\n {\n field: _(\n \"You cannot change this field after \"\n \"the registration has started.\"\n )\n }\n )\n\n if errors:\n raise ValidationError(errors)\n\n def clean(self):\n # pylint: disable=too-many-branches\n super().clean()\n errors = {}\n if self.start is None:\n errors.update({\"start\": _(\"Start cannot have an empty date or time field\")})\n if self.end is None:\n errors.update({\"end\": _(\"End cannot have an empty date or time field\")})\n if self.start is not None and self.end is not None:\n if self.end < self.start:\n errors.update({\"end\": _(\"Can't have an event travel back in time\")})\n if self.registration_required:\n if self.optional_registrations:\n errors.update(\n {\n \"optional_registrations\": _(\n \"This is not possible when actual registrations are required.\"\n )\n }\n )\n if self.fine < 5:\n errors.update(\n {\n \"fine\": _(\n \"The fine for this event is too low \"\n \"(must be at least €5).\"\n )\n }\n )\n if self.no_registration_message:\n errors.update(\n {\n \"no_registration_message\": _(\n \"Doesn't make sense to have this \"\n \"if you require registrations.\"\n )\n }\n )\n if not self.registration_start:\n errors.update(\n {\n \"registration_start\": _(\n \"If registration is required, you need a start of \"\n \"registration\"\n )\n }\n )\n if not self.registration_end:\n errors.update(\n {\n \"registration_end\": _(\n \"If registration is required, you need an end of \"\n \"registration\"\n )\n }\n )\n if not self.cancel_deadline:\n errors.update(\n {\n \"cancel_deadline\": _(\n \"If registration is required, \"\n \"you need a deadline for the cancellation\"\n )\n }\n )\n elif self.cancel_deadline > self.start:\n errors.update(\n {\n \"cancel_deadline\": _(\n \"The cancel deadline should be\"\n \" before the start of the event.\"\n )\n }\n )\n if (\n self.registration_start\n and self.registration_end\n and (self.registration_start >= self.registration_end)\n ):\n message = _(\"Registration start should be before registration end\")\n errors.update(\n {\"registration_start\": message, \"registration_end\": message}\n )\n\n if errors:\n raise ValidationError(errors)\n\n def get_absolute_url(self):\n return reverse(\"events:event\", args=[str(self.pk)])\n\n def save(self, **kwargs):\n delete_collector = Collector(\n using=router.db_for_write(self.__class__, instance=self)\n )\n\n if not self.pk:\n super().save(**kwargs)\n\n if self.published:\n if self.registration_required:\n registration_reminder_time = (\n self.registration_start - timezone.timedelta(hours=1)\n )\n registration_reminder = ScheduledMessage()\n if (\n self.registration_reminder is not None\n and not self.registration_reminder.sent\n ):\n registration_reminder = self.registration_reminder\n\n if registration_reminder_time > timezone.now():\n registration_reminder.title = \"Event registration\"\n registration_reminder.body = (\n f\"Registration for '{self.title}' starts in 1 hour\"\n )\n registration_reminder.category = Category.objects.get(\n key=Category.EVENT\n )\n registration_reminder.time = registration_reminder_time\n registration_reminder.url = (\n f\"{settings.BASE_URL}{reverse('events:event', args=[self.id])}\"\n )\n\n registration_reminder.save()\n self.registration_reminder = registration_reminder\n self.registration_reminder.users.set(Member.current_members.all())\n elif registration_reminder.pk is not None:\n delete_collector.collect([self.registration_reminder])\n self.registration_reminder = None\n\n start_reminder_time = self.start - timezone.timedelta(hours=1)\n start_reminder = ScheduledMessage()\n if self.start_reminder is not None and not self.start_reminder.sent:\n start_reminder = self.start_reminder\n\n if start_reminder_time > timezone.now():\n start_reminder.title = \"Event\"\n start_reminder.body = f\"'{self.title}' starts in 1 hour\"\n start_reminder.category = Category.objects.get(key=Category.EVENT)\n start_reminder.time = start_reminder_time\n start_reminder.save()\n self.start_reminder = start_reminder\n if self.registration_required:\n self.start_reminder.users.set(\n [r.member for r in self.participants if r.member]\n )\n else:\n self.start_reminder.users.set(Member.current_members.all())\n elif start_reminder.pk is not None:\n delete_collector.collect([self.start_reminder])\n self.start_reminder = None\n else:\n if (\n self.registration_reminder is not None\n and not self.registration_reminder.sent\n ):\n delete_collector.collect([self.registration_reminder])\n self.registration_reminder = None\n\n if self.start_reminder is not None and not self.start_reminder.sent:\n delete_collector.collect([self.start_reminder])\n self.start_reminder = None\n\n super().save()\n delete_collector.delete()\n\n def delete(self, using=None, keep_parents=False):\n using = using or router.db_for_write(self.__class__, instance=self)\n collector = Collector(using=using)\n collector.collect([self], keep_parents=keep_parents)\n\n if (\n self.registration_reminder is not None\n and not self.registration_reminder.sent\n ):\n collector.collect([self.registration_reminder], keep_parents=keep_parents)\n if self.start_reminder is not None and not self.start_reminder.sent:\n collector.collect([self.start_reminder], keep_parents=keep_parents)\n if self.has_food_event:\n collector.add([self.food_event])\n\n return collector.delete()\n\n def __str__(self):\n return f\"{self.title}: {timezone.localtime(self.start):%Y-%m-%d %H:%M}\"\n\n DEFAULT_STATUS_MESSAGE = {\n status.STATUS_WILL_OPEN: _(\"Registration will open {regstart}.\"),\n status.STATUS_EXPIRED: _(\"Registration is not possible anymore.\"),\n status.STATUS_OPEN: _(\"You can register now.\"),\n status.STATUS_FULL: _(\n \"Registrations are full, but you can join the waiting list.\"\n ),\n status.STATUS_WAITINGLIST: _(\"You are in queue position {pos}.\"),\n status.STATUS_REGISTERED: _(\"You are registered for this event.\"),\n status.STATUS_CANCELLED: _(\n \"Your registration for this event is cancelled. You may still re-register.\"\n ),\n status.STATUS_CANCELLED_FINAL: _(\n \"Your registration for this event is cancelled. Note that you cannot re-register.\"\n ),\n status.STATUS_CANCELLED_LATE: _(\n \"Your registration is cancelled after the deadline and you will pay a fine of €{fine}.\"\n ),\n status.STATUS_OPTIONAL: _(\"You can optionally register for this event.\"),\n status.STATUS_OPTIONAL_REGISTERED: _(\n \"You are optionally registered for this event.\"\n ),\n status.STATUS_NONE: DEFAULT_NO_REGISTRATION_MESSAGE,\n status.STATUS_LOGIN: _(\n \"You have to log in before you can register for this event.\"\n ),\n }\n\n STATUS_MESSAGE_FIELDS = {\n status.STATUS_WILL_OPEN: \"registration_msg_will_open\",\n status.STATUS_EXPIRED: \"registration_msg_expired\",\n status.STATUS_OPEN: \"registration_msg_open\",\n status.STATUS_FULL: \"registration_msg_full\",\n status.STATUS_WAITINGLIST: \"registration_msg_waitinglist\",\n status.STATUS_REGISTERED: \"registration_msg_registered\",\n status.STATUS_CANCELLED_FINAL: \"registration_msg_cancelled_final\",\n status.STATUS_CANCELLED: \"registration_msg_cancelled\",\n status.STATUS_CANCELLED_LATE: \"registration_msg_cancelled_late\",\n status.STATUS_OPTIONAL: \"registration_msg_optional\",\n status.STATUS_OPTIONAL_REGISTERED: \"registration_msg_optional_registered\",\n status.STATUS_NONE: \"no_registration_message\",\n }\n\n registration_msg_will_open = models.CharField(\n _(\n \"message when registrations are still closed (and the user is not registered)\"\n ),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_WILL_OPEN],\n ),\n )\n registration_msg_expired = models.CharField(\n _(\n \"message when the registration deadline expired and the user is not registered\"\n ),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_EXPIRED],\n ),\n )\n registration_msg_open = models.CharField(\n _(\"message when registrations are open and the user is not registered\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_OPEN],\n ),\n )\n registration_msg_full = models.CharField(\n _(\n \"message when registrations are open, but full and the user is not registered\"\n ),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_FULL],\n ),\n )\n registration_msg_waitinglist = models.CharField(\n _(\"message when user is on the waiting list\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_WAITINGLIST],\n ),\n )\n registration_msg_registered = models.CharField(\n _(\"message when user is registered\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_REGISTERED],\n ),\n )\n registration_msg_cancelled = models.CharField(\n _(\"message when user cancelled their registration in time\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_CANCELLED],\n ),\n )\n registration_msg_cancelled_final = models.CharField(\n _(\n \"message when user cancelled their registration in time and cannot re-register\"\n ),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_CANCELLED_FINAL],\n ),\n )\n registration_msg_cancelled_late = models.CharField(\n _(\"message when user cancelled their registration late and will pay a fine\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_CANCELLED_LATE],\n ),\n )\n registration_msg_optional = models.CharField(\n _(\"message when registrations are optional and the user is not registered\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_OPTIONAL],\n ),\n )\n registration_msg_optional_registered = models.CharField(\n _(\"message when registrations are optional and the user is registered\"),\n max_length=200,\n blank=True,\n null=True,\n help_text=format_lazy(\n \"{} {}\",\n _(\"Default:\"),\n DEFAULT_STATUS_MESSAGE[status.STATUS_OPTIONAL_REGISTERED],\n ),\n )\n\n class Meta:\n ordering = (\"-start\",)\n permissions = ((\"override_organiser\", \"Can access events as if organizing\"),)\n", "path": "website/events/models/event.py" } ]
diff --git a/website/events/models/event.py b/website/events/models/event.py index 06908f889..39e36cdbf 100644 --- a/website/events/models/event.py +++ b/website/events/models/event.py @@ -258,7 +258,7 @@ def has_fields(self): participant_count = AggregateProperty( Count( "eventregistration", - filter=~Q(eventregistration__date_cancelled__lt=timezone.now()), + filter=Q(eventregistration__date_cancelled=None), ) ) diff --git a/website/events/tests/test_views.py b/website/events/tests/test_views.py index aba8b7b60..860502e22 100644 --- a/website/events/tests/test_views.py +++ b/website/events/tests/test_views.py @@ -182,7 +182,7 @@ def setUp(self): def test_registration_register_not_required(self): response = self.client.post("/events/1/registration/register/", follow=True) self.assertEqual(response.status_code, 200) - self.assertEqual(self.event.participants.count(), 1) + self.assertEqual(self.event.participant_count, 1) def test_registration_register(self): self.event.registration_start = timezone.now() - datetime.timedelta(hours=1) @@ -191,7 +191,7 @@ def test_registration_register(self): self.event.save() response = self.client.post("/events/1/registration/register/", follow=True) self.assertEqual(response.status_code, 200) - self.assertEqual(self.event.participants.count(), 1) + self.assertEqual(self.event.participant_count, 1) self.assertEqual(self.event.eventregistration_set.first().member, self.member) def test_registration_register_twice(self): @@ -203,7 +203,7 @@ def test_registration_register_twice(self): self.assertEqual(response.status_code, 200) response = self.client.post("/events/1/registration/register/", follow=True) self.assertEqual(response.status_code, 200) - self.assertEqual(self.event.participants.count(), 1) + self.assertEqual(self.event.participant_count, 1) def test_registration_register_closed(self): self.event.registration_start = timezone.now() - datetime.timedelta(hours=2) @@ -212,7 +212,7 @@ def test_registration_register_closed(self): self.event.save() response = self.client.post("/events/1/registration/register/", follow=True) self.assertEqual(response.status_code, 200) - self.assertEqual(self.event.participants.count(), 0) + self.assertEqual(self.event.participant_count, 0) def test_registration_cancel(self): self.event.registration_start = timezone.now() - datetime.timedelta(hours=1) @@ -222,7 +222,7 @@ def test_registration_cancel(self): EventRegistration.objects.create(event=self.event, member=self.member) response = self.client.post("/events/1/registration/cancel/", follow=True) self.assertEqual(response.status_code, 200) - self.assertEqual(self.event.participants.count(), 0) + self.assertEqual(self.event.participant_count, 0) def test_registration_register_no_fields(self): self.event.registration_start = timezone.now() - datetime.timedelta(hours=1) @@ -261,7 +261,7 @@ def test_registration_register_no_fields(self): ) self.assertEqual(response.status_code, 200) - self.assertEqual(self.event.participants.count(), 1) + self.assertEqual(self.event.participant_count, 1) registration = self.event.eventregistration_set.first() self.assertEqual(field1.get_value_for(registration), None) self.assertEqual(field2.get_value_for(registration), None) @@ -301,7 +301,7 @@ def test_registration_missing_fields(self): self.assertEqual(response.status_code, 200) template_names = [template.name for template in response.templates] self.assertIn("events/registration.html", template_names) - self.assertEqual(self.event.participants.count(), 1) + self.assertEqual(self.event.participant_count, 1) def test_registration_register_fields_required(self): self.event.registration_start = timezone.now() - datetime.timedelta(hours=1) @@ -320,7 +320,7 @@ def test_registration_register_fields_required(self): self.assertEqual(response.status_code, 200) template_names = [template.name for template in response.templates] self.assertIn("events/registration.html", template_names) - self.assertEqual(self.event.participants.count(), 1) + self.assertEqual(self.event.participant_count, 1) def test_registration_update_form_load_not_changes_fields(self): self.event.registration_start = timezone.now() - datetime.timedelta(hours=1) @@ -428,7 +428,7 @@ def test_registration_update_form_post_changes_fields(self): ) self.assertEqual(response.status_code, 200) - self.assertEqual(self.event.participants.count(), 1) + self.assertEqual(self.event.participant_count, 1) registration = self.event.eventregistration_set.first() self.assertEqual(field1.get_value_for(registration), False) self.assertEqual(field2.get_value_for(registration), 1337) @@ -443,7 +443,7 @@ def test_registration_cancel_after_deadline_notification(self): EventRegistration.objects.create(event=self.event, member=self.member) response = self.client.post("/events/1/registration/cancel/", follow=True) self.assertEqual(response.status_code, 200) - self.assertEqual(self.event.participants.count(), 0) + self.assertEqual(self.event.participant_count, 0) self.assertEqual(len(mail.outbox), 1) self.assertEqual( mail.outbox[0].to,
google__mobly-311
Exceptions in `setup_test` should leave the test in `ERROR` status Regardless of the type of the exception, `setup_test` error should cause `ERROR` status. This is different from a test method. In a test method, an exception based on signals.TestFailure should cause the test to exit with `FAILED` status. This is to be consistent with pyunit's behavior.
[ { "content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is where all the test signal classes and related utilities live.\n\"\"\"\n\nimport functools\nimport json\n\n\nclass TestSignalError(Exception):\n \"\"\"Raised when an error occurs inside a test signal.\"\"\"\n\n\nclass TestSignal(Exception):\n \"\"\"Base class for all test result control signals. This is used to signal\n the result of a test.\n\n Attribute:\n details: A string that describes the reason for raising this signal.\n extras: A json-serializable data type to convey extra information about\n a test result.\n \"\"\"\n\n def __init__(self, details, extras=None):\n super(TestSignal, self).__init__(details)\n self.details = str(details)\n try:\n json.dumps(extras)\n self.extras = extras\n except TypeError:\n raise TestSignalError('Extras must be json serializable. %s '\n 'is not.' % extras)\n\n def __str__(self):\n return 'Details=%s, Extras=%s' % (self.details, self.extras)\n\n\nclass TestFailure(TestSignal):\n \"\"\"Raised when a test has failed.\"\"\"\n\n\nclass TestPass(TestSignal):\n \"\"\"Raised when a test has passed.\"\"\"\n\n\nclass TestSkip(TestSignal):\n \"\"\"Raised when a test has been skipped.\"\"\"\n\n\nclass TestAbortClass(TestSignal):\n \"\"\"Raised when all subsequent tests within the same test class should\n be aborted.\n \"\"\"\n\n\nclass TestAbortAll(TestSignal):\n \"\"\"Raised when all subsequent tests should be aborted.\"\"\"\n\n\nclass ControllerError(Exception):\n \"\"\"Raised when an error occured in controller classes.\"\"\"\n", "path": "mobly/signals.py" } ]
[ { "content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is where all the test signal classes and related utilities live.\n\"\"\"\n\nimport functools\nimport json\n\n\nclass TestSignalError(Exception):\n \"\"\"Raised when an error occurs inside a test signal.\"\"\"\n\n\nclass TestSignal(Exception):\n \"\"\"Base class for all test result control signals. This is used to signal\n the result of a test.\n\n Attribute:\n details: A string that describes the reason for raising this signal.\n extras: A json-serializable data type to convey extra information about\n a test result.\n \"\"\"\n\n def __init__(self, details, extras=None):\n super(TestSignal, self).__init__(details)\n self.details = str(details)\n try:\n json.dumps(extras)\n self.extras = extras\n except TypeError:\n raise TestSignalError('Extras must be json serializable. %s '\n 'is not.' % extras)\n\n def __str__(self):\n return 'Details=%s, Extras=%s' % (self.details, self.extras)\n\n\nclass TestError(TestSignal):\n \"\"\"Raised when a test has an unexpected error.\"\"\"\n\n\nclass TestFailure(TestSignal):\n \"\"\"Raised when a test has failed.\"\"\"\n\n\nclass TestPass(TestSignal):\n \"\"\"Raised when a test has passed.\"\"\"\n\n\nclass TestSkip(TestSignal):\n \"\"\"Raised when a test has been skipped.\"\"\"\n\n\nclass TestAbortClass(TestSignal):\n \"\"\"Raised when all subsequent tests within the same test class should\n be aborted.\n \"\"\"\n\n\nclass TestAbortAll(TestSignal):\n \"\"\"Raised when all subsequent tests should be aborted.\"\"\"\n\n\nclass ControllerError(Exception):\n \"\"\"Raised when an error occured in controller classes.\"\"\"\n", "path": "mobly/signals.py" } ]
diff --git a/mobly/base_test.py b/mobly/base_test.py index 5233aa5c..fde8faba 100644 --- a/mobly/base_test.py +++ b/mobly/base_test.py @@ -17,6 +17,7 @@ import functools import inspect import logging +import sys from mobly import logger from mobly import records @@ -316,7 +317,7 @@ def exec_one_test(self, test_name, test_method, args=(), **kwargs): Executes setup_test, the test method, and teardown_test; then creates a records.TestResultRecord object with the execution information and adds - the record to the test class's test results. + the record to the test class's test result s. Args: test_name: Name of the test. @@ -330,7 +331,12 @@ def exec_one_test(self, test_name, test_method, args=(), **kwargs): teardown_test_failed = False try: try: - self._setup_test(test_name) + try: + self._setup_test(test_name) + except signals.TestFailure as e: + new_e = signals.TestError(e.details, e.extras) + _, _, new_e.__traceback__ = sys.exc_info() + raise new_e if args or kwargs: test_method(*args, **kwargs) else: diff --git a/mobly/signals.py b/mobly/signals.py index 8899065a..85bdc303 100644 --- a/mobly/signals.py +++ b/mobly/signals.py @@ -46,6 +46,10 @@ def __str__(self): return 'Details=%s, Extras=%s' % (self.details, self.extras) +class TestError(TestSignal): + """Raised when a test has an unexpected error.""" + + class TestFailure(TestSignal): """Raised when a test has failed.""" diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py index bd7dce9b..d8604c94 100755 --- a/tests/mobly/base_test_test.py +++ b/tests/mobly/base_test_test.py @@ -239,11 +239,13 @@ def test_something(self): bt_cls = MockBaseTest(self.mock_test_cls_configs) bt_cls.run(test_names=["test_something"]) - actual_record = bt_cls.results.failed[0] + actual_record = bt_cls.results.error[0] self.assertEqual(actual_record.test_name, self.mock_test_name) self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION) + # Make sure the full stacktrace of `setup_test` is preserved. + self.assertTrue('self.setup_test()' in actual_record.stacktrace) self.assertIsNone(actual_record.extras) - expected_summary = ("Error 0, Executed 1, Failed 1, Passed 0, " + expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, " "Requested 1, Skipped 0") self.assertEqual(bt_cls.results.summary_str(), expected_summary) @@ -407,6 +409,7 @@ def test_something(self): def test_procedure_function_gets_correct_record(self): on_fail_mock = mock.MagicMock() + class MockBaseTest(base_test.BaseTestClass): def on_fail(self, record): on_fail_mock.record = record @@ -418,12 +421,16 @@ def test_something(self): bt_cls.run() actual_record = bt_cls.results.failed[0] self.assertEqual(actual_record.test_name, 'test_something') - self.assertEqual(on_fail_mock.record.test_name, actual_record.test_name) - self.assertEqual(on_fail_mock.record.begin_time, actual_record.begin_time) + self.assertEqual(on_fail_mock.record.test_name, + actual_record.test_name) + self.assertEqual(on_fail_mock.record.begin_time, + actual_record.begin_time) self.assertEqual(on_fail_mock.record.end_time, actual_record.end_time) - self.assertEqual(on_fail_mock.record.stacktrace, actual_record.stacktrace) + self.assertEqual(on_fail_mock.record.stacktrace, + actual_record.stacktrace) self.assertEqual(on_fail_mock.record.extras, actual_record.extras) - self.assertEqual(on_fail_mock.record.extra_errors, actual_record.extra_errors) + self.assertEqual(on_fail_mock.record.extra_errors, + actual_record.extra_errors) # But they are not the same object. self.assertIsNot(on_fail_mock.record, actual_record) @@ -989,6 +996,23 @@ def test_func(self): self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION) self.assertEqual(actual_record.extras, MOCK_EXTRA) + def test_skip_in_setup_test(self): + class MockBaseTest(base_test.BaseTestClass): + def setup_test(self): + asserts.skip(MSG_EXPECTED_EXCEPTION, extras=MOCK_EXTRA) + + def test_func(self): + never_call() + + bt_cls = MockBaseTest(self.mock_test_cls_configs) + bt_cls.run(test_names=["test_func"]) + actual_record = bt_cls.results.skipped[0] + self.assertIsNotNone(actual_record.begin_time) + self.assertIsNotNone(actual_record.end_time) + self.assertEqual(actual_record.test_name, "test_func") + self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION) + self.assertEqual(actual_record.extras, MOCK_EXTRA) + def test_unpack_userparams_required(self): """Missing a required param should raise an error.""" required = ["some_param"]
Pyomo__pyomo-3017
Pyomo AD fails with constant Expressions ## Summary Pyomo AD raises an error when encountering `Expressions` with a constant value. ### Steps to reproduce the issue ```console import pyomo.environ as pyo from pyomo.core.expr.calculus.derivatives import Modes, differentiate m = pyo.ConcreteModel() m.x = pyo.Var(units=pyo.units.mol, initialize=0) @m.Expression() def y(blk): return 2 @m.Expression() def product(blk): return blk.x * blk.y diff_expr = differentiate( expr=m.product, wrt=m.x, mode=Modes.reverse_symbolic ) diff_expr.pprint() ``` ### Error Message ```console Exception has occurred: KeyError "Component with id '1907001786672': 2.0" File "C:\Users\[REDACTED]\Repos\pyomo\pyomo\common\collections\component_map.py", line 71, in __getitem__ return self._dict[id(obj)][1] KeyError: 1907001786672 During handling of the above exception, another exception occurred: File "C:\Users\[REDACTED]\Repos\pyomo\pyomo\common\collections\component_map.py", line 73, in __getitem__ raise KeyError("Component with id '%s': %s" % (id(obj), str(obj))) File "C:\Users\[REDACTED]\Repos\pyomo\pyomo\core\expr\calculus\diff_with_pyomo.py", line 331, in _diff_GeneralExpression der_dict[node.expr] += der_dict[node] File "C:\Users\[REDACTED]\Repos\pyomo\pyomo\core\expr\calculus\diff_with_pyomo.py", line 442, in _reverse_diff_helper _diff_GeneralExpression(e, val_dict, der_dict) File "C:\Users\[REDACTED]\Repos\pyomo\pyomo\core\expr\calculus\diff_with_pyomo.py", line 484, in reverse_sd return _reverse_diff_helper(expr, False) File "C:\Users\[REDACTED]\Repos\pyomo\pyomo\core\expr\calculus\derivatives.py", line 96, in differentiate res = reverse_sd(expr=expr) File "C:\Users\[REDACTED]\Desktop\diff_expr.py", line 16, in <module> diff_expr = differentiate( KeyError: "Component with id '1907001786672': 2.0" ``` ### Information on your system Pyomo version: 6.6.2 Python version: 3.10.10 | packaged by Anaconda, Inc. | (main, Mar 21 2023, 18:39:17) [MSC v.1916 64 bit (AMD64)] Operating system: Windows 10 How Pyomo was installed (PyPI, conda, source): From sources Solver (if applicable): n/a
[ { "content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nfrom pyomo.common.collections import ComponentMap, ComponentSet\nimport pyomo.core.expr as _expr\nfrom pyomo.core.expr.visitor import ExpressionValueVisitor, nonpyomo_leaf_types\nfrom pyomo.core.expr.numvalue import value, is_constant\nfrom pyomo.core.expr import exp, log, sin, cos\nimport math\n\n\n\"\"\"\nThe purpose of this file is to perform symbolic differentiation and \nfirst order automatic differentiation directly with pyomo \nexpressions. This is certainly not as efficient as doing AD in C or \nC++, but it avoids the translation from pyomo expressions to a form \nwhere AD can be performed efficiently. The only functions that are \nmeant to be used by users are reverse_ad and reverse_sd. First, \nvalues are propagated from the leaves to each node in the tree with \nthe LeafToRoot visitors. Then derivative values are propagated from \nthe root to the leaves with the RootToLeaf visitors.\n\"\"\"\n\n\nclass DifferentiationException(Exception):\n pass\n\n\ndef _diff_ProductExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.ProductExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 2\n arg1, arg2 = node.args\n der = der_dict[node]\n der_dict[arg1] += der * val_dict[arg2]\n der_dict[arg2] += der * val_dict[arg1]\n\n\ndef _diff_SumExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.SumExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n der = der_dict[node]\n for arg in node.args:\n der_dict[arg] += der\n\n\ndef _diff_PowExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.PowExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 2\n arg1, arg2 = node.args\n der = der_dict[node]\n val1 = val_dict[arg1]\n val2 = val_dict[arg2]\n der_dict[arg1] += der * val2 * val1 ** (val2 - 1)\n if arg2.__class__ not in nonpyomo_leaf_types:\n der_dict[arg2] += der * val1**val2 * log(val1)\n\n\ndef _diff_DivisionExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.DivisionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 2\n num = node.args[0]\n den = node.args[1]\n der = der_dict[node]\n der_dict[num] += der * (1 / val_dict[den])\n der_dict[den] -= der * val_dict[num] / val_dict[den] ** 2\n\n\ndef _diff_NegationExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] -= der\n\n\ndef _diff_exp(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der * exp(val_dict[arg])\n\n\ndef _diff_log(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der / val_dict[arg]\n\n\ndef _diff_log10(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der * math.log10(math.exp(1)) / val_dict[arg]\n\n\ndef _diff_sin(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der * cos(val_dict[arg])\n\n\ndef _diff_cos(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] -= der * sin(val_dict[arg])\n\n\ndef _diff_tan(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der / (cos(val_dict[arg]) ** 2)\n\n\ndef _diff_asin(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der / (1 - val_dict[arg] ** 2) ** 0.5\n\n\ndef _diff_acos(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] -= der / (1 - val_dict[arg] ** 2) ** 0.5\n\n\ndef _diff_atan(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der / (1 + val_dict[arg] ** 2)\n\n\ndef _diff_sqrt(node, val_dict, der_dict):\n \"\"\"\n Reverse automatic differentiation on the square root function.\n Implementation copied from power function, with fixed exponent.\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der * 0.5 * val_dict[arg] ** (-0.5)\n\n\ndef _diff_abs(node, val_dict, der_dict):\n \"\"\"\n Reverse automatic differentiation on the abs function.\n This will raise an exception at 0.\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n val = val_dict[arg]\n if is_constant(val) and val == 0:\n raise DifferentiationException('Cannot differentiate abs(x) at x=0')\n der_dict[arg] += der * val / abs(val)\n\n\n_unary_map = dict()\n_unary_map['exp'] = _diff_exp\n_unary_map['log'] = _diff_log\n_unary_map['log10'] = _diff_log10\n_unary_map['sin'] = _diff_sin\n_unary_map['cos'] = _diff_cos\n_unary_map['tan'] = _diff_tan\n_unary_map['asin'] = _diff_asin\n_unary_map['acos'] = _diff_acos\n_unary_map['atan'] = _diff_atan\n_unary_map['sqrt'] = _diff_sqrt\n_unary_map['abs'] = _diff_abs\n\n\ndef _diff_UnaryFunctionExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n if node.getname() in _unary_map:\n _unary_map[node.getname()](node, val_dict, der_dict)\n else:\n raise DifferentiationException(\n 'Unsupported expression type for differentiation: {0}'.format(type(node))\n )\n\n\ndef _diff_GeneralExpression(node, val_dict, der_dict):\n \"\"\"\n Reverse automatic differentiation for named expressions.\n\n Parameters\n ----------\n node: The named expression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n der_dict[node.expr] += der_dict[node]\n\n\ndef _diff_ExternalFunctionExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.ExternalFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n der = der_dict[node]\n vals = tuple(val_dict[i] for i in node.args)\n derivs = node._fcn.evaluate_fgh(vals, fgh=1)[1]\n for ndx, arg in enumerate(node.args):\n der_dict[arg] += der * derivs[ndx]\n\n\n_diff_map = dict()\n_diff_map[_expr.ProductExpression] = _diff_ProductExpression\n_diff_map[_expr.DivisionExpression] = _diff_DivisionExpression\n_diff_map[_expr.PowExpression] = _diff_PowExpression\n_diff_map[_expr.SumExpression] = _diff_SumExpression\n_diff_map[_expr.MonomialTermExpression] = _diff_ProductExpression\n_diff_map[_expr.NegationExpression] = _diff_NegationExpression\n_diff_map[_expr.UnaryFunctionExpression] = _diff_UnaryFunctionExpression\n_diff_map[_expr.ExternalFunctionExpression] = _diff_ExternalFunctionExpression\n_diff_map[_expr.LinearExpression] = _diff_SumExpression\n_diff_map[_expr.AbsExpression] = _diff_abs\n\n_diff_map[_expr.NPV_ProductExpression] = _diff_ProductExpression\n_diff_map[_expr.NPV_DivisionExpression] = _diff_DivisionExpression\n_diff_map[_expr.NPV_PowExpression] = _diff_PowExpression\n_diff_map[_expr.NPV_SumExpression] = _diff_SumExpression\n_diff_map[_expr.NPV_NegationExpression] = _diff_NegationExpression\n_diff_map[_expr.NPV_UnaryFunctionExpression] = _diff_UnaryFunctionExpression\n_diff_map[_expr.NPV_ExternalFunctionExpression] = _diff_ExternalFunctionExpression\n_diff_map[_expr.NPV_AbsExpression] = _diff_abs\n\n\ndef _symbolic_value(x):\n return x\n\n\ndef _numeric_apply_operation(node, values):\n return node._apply_operation(values)\n\n\ndef _symbolic_apply_operation(node, values):\n return node\n\n\nclass _LeafToRootVisitor(ExpressionValueVisitor):\n def __init__(self, val_dict, der_dict, expr_list, numeric=True):\n \"\"\"\n Parameters\n ----------\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n self.val_dict = val_dict\n self.der_dict = der_dict\n self.expr_list = expr_list\n assert len(self.expr_list) == 0\n assert len(self.val_dict) == 0\n assert len(self.der_dict) == 0\n if numeric:\n self.value_func = value\n self.operation_func = _numeric_apply_operation\n else:\n self.value_func = _symbolic_value\n self.operation_func = _symbolic_apply_operation\n\n def visit(self, node, values):\n self.val_dict[node] = self.operation_func(node, values)\n self.der_dict[node] = 0\n self.expr_list.append(node)\n return self.val_dict[node]\n\n def visiting_potential_leaf(self, node):\n if node in self.val_dict:\n return True, self.val_dict[node]\n\n if node.__class__ in nonpyomo_leaf_types:\n self.val_dict[node] = node\n self.der_dict[node] = 0\n return True, node\n\n if not node.is_expression_type():\n val = self.value_func(node)\n self.val_dict[node] = val\n self.der_dict[node] = 0\n return True, val\n\n return False, None\n\n\ndef _reverse_diff_helper(expr, numeric=True):\n val_dict = ComponentMap()\n der_dict = ComponentMap()\n expr_list = list()\n\n visitorA = _LeafToRootVisitor(val_dict, der_dict, expr_list, numeric=numeric)\n visitorA.dfs_postorder_stack(expr)\n\n der_dict[expr] = 1\n for e in reversed(expr_list):\n if e.__class__ in _diff_map:\n _diff_map[e.__class__](e, val_dict, der_dict)\n elif e.is_named_expression_type():\n _diff_GeneralExpression(e, val_dict, der_dict)\n else:\n raise DifferentiationException(\n 'Unsupported expression type for differentiation: {0}'.format(type(e))\n )\n\n return der_dict\n\n\ndef reverse_ad(expr):\n \"\"\"\n First order reverse automatic differentiation\n\n Parameters\n ----------\n expr: pyomo.core.expr.numeric_expr.NumericExpression\n expression to differentiate\n\n Returns\n -------\n ComponentMap\n component_map mapping variables to derivatives with respect\n to the corresponding variable\n \"\"\"\n return _reverse_diff_helper(expr, True)\n\n\ndef reverse_sd(expr):\n \"\"\"\n First order reverse symbolic differentiation\n\n Parameters\n ----------\n expr: pyomo.core.expr.numeric_expr.NumericExpression\n expression to differentiate\n\n Returns\n -------\n ComponentMap\n component_map mapping variables to derivatives with respect\n to the corresponding variable\n \"\"\"\n return _reverse_diff_helper(expr, False)\n", "path": "pyomo/core/expr/calculus/diff_with_pyomo.py" } ]
[ { "content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nfrom pyomo.common.collections import ComponentMap, ComponentSet\nimport pyomo.core.expr as _expr\nfrom pyomo.core.expr.visitor import ExpressionValueVisitor, nonpyomo_leaf_types\nfrom pyomo.core.expr.numvalue import value, is_constant\nfrom pyomo.core.expr import exp, log, sin, cos\nimport math\n\n\n\"\"\"\nThe purpose of this file is to perform symbolic differentiation and \nfirst order automatic differentiation directly with pyomo \nexpressions. This is certainly not as efficient as doing AD in C or \nC++, but it avoids the translation from pyomo expressions to a form \nwhere AD can be performed efficiently. The only functions that are \nmeant to be used by users are reverse_ad and reverse_sd. First, \nvalues are propagated from the leaves to each node in the tree with \nthe LeafToRoot visitors. Then derivative values are propagated from \nthe root to the leaves with the RootToLeaf visitors.\n\"\"\"\n\n\nclass DifferentiationException(Exception):\n pass\n\n\ndef _diff_ProductExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.ProductExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 2\n arg1, arg2 = node.args\n der = der_dict[node]\n der_dict[arg1] += der * val_dict[arg2]\n der_dict[arg2] += der * val_dict[arg1]\n\n\ndef _diff_SumExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.SumExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n der = der_dict[node]\n for arg in node.args:\n der_dict[arg] += der\n\n\ndef _diff_PowExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.PowExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 2\n arg1, arg2 = node.args\n der = der_dict[node]\n val1 = val_dict[arg1]\n val2 = val_dict[arg2]\n der_dict[arg1] += der * val2 * val1 ** (val2 - 1)\n if arg2.__class__ not in nonpyomo_leaf_types:\n der_dict[arg2] += der * val1**val2 * log(val1)\n\n\ndef _diff_DivisionExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.DivisionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 2\n num = node.args[0]\n den = node.args[1]\n der = der_dict[node]\n der_dict[num] += der * (1 / val_dict[den])\n der_dict[den] -= der * val_dict[num] / val_dict[den] ** 2\n\n\ndef _diff_NegationExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] -= der\n\n\ndef _diff_exp(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der * exp(val_dict[arg])\n\n\ndef _diff_log(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der / val_dict[arg]\n\n\ndef _diff_log10(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der * math.log10(math.exp(1)) / val_dict[arg]\n\n\ndef _diff_sin(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der * cos(val_dict[arg])\n\n\ndef _diff_cos(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] -= der * sin(val_dict[arg])\n\n\ndef _diff_tan(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der / (cos(val_dict[arg]) ** 2)\n\n\ndef _diff_asin(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der / (1 - val_dict[arg] ** 2) ** 0.5\n\n\ndef _diff_acos(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] -= der / (1 - val_dict[arg] ** 2) ** 0.5\n\n\ndef _diff_atan(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der / (1 + val_dict[arg] ** 2)\n\n\ndef _diff_sqrt(node, val_dict, der_dict):\n \"\"\"\n Reverse automatic differentiation on the square root function.\n Implementation copied from power function, with fixed exponent.\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n der_dict[arg] += der * 0.5 * val_dict[arg] ** (-0.5)\n\n\ndef _diff_abs(node, val_dict, der_dict):\n \"\"\"\n Reverse automatic differentiation on the abs function.\n This will raise an exception at 0.\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n assert len(node.args) == 1\n arg = node.args[0]\n der = der_dict[node]\n val = val_dict[arg]\n if is_constant(val) and val == 0:\n raise DifferentiationException('Cannot differentiate abs(x) at x=0')\n der_dict[arg] += der * val / abs(val)\n\n\n_unary_map = dict()\n_unary_map['exp'] = _diff_exp\n_unary_map['log'] = _diff_log\n_unary_map['log10'] = _diff_log10\n_unary_map['sin'] = _diff_sin\n_unary_map['cos'] = _diff_cos\n_unary_map['tan'] = _diff_tan\n_unary_map['asin'] = _diff_asin\n_unary_map['acos'] = _diff_acos\n_unary_map['atan'] = _diff_atan\n_unary_map['sqrt'] = _diff_sqrt\n_unary_map['abs'] = _diff_abs\n\n\ndef _diff_UnaryFunctionExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.UnaryFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n if node.getname() in _unary_map:\n _unary_map[node.getname()](node, val_dict, der_dict)\n else:\n raise DifferentiationException(\n 'Unsupported expression type for differentiation: {0}'.format(type(node))\n )\n\n\ndef _diff_GeneralExpression(node, val_dict, der_dict):\n \"\"\"\n Reverse automatic differentiation for named expressions.\n\n Parameters\n ----------\n node: The named expression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n der_dict[node.arg(0)] += der_dict[node]\n\n\ndef _diff_ExternalFunctionExpression(node, val_dict, der_dict):\n \"\"\"\n\n Parameters\n ----------\n node: pyomo.core.expr.numeric_expr.ExternalFunctionExpression\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n der = der_dict[node]\n vals = tuple(val_dict[i] for i in node.args)\n derivs = node._fcn.evaluate_fgh(vals, fgh=1)[1]\n for ndx, arg in enumerate(node.args):\n der_dict[arg] += der * derivs[ndx]\n\n\n_diff_map = dict()\n_diff_map[_expr.ProductExpression] = _diff_ProductExpression\n_diff_map[_expr.DivisionExpression] = _diff_DivisionExpression\n_diff_map[_expr.PowExpression] = _diff_PowExpression\n_diff_map[_expr.SumExpression] = _diff_SumExpression\n_diff_map[_expr.MonomialTermExpression] = _diff_ProductExpression\n_diff_map[_expr.NegationExpression] = _diff_NegationExpression\n_diff_map[_expr.UnaryFunctionExpression] = _diff_UnaryFunctionExpression\n_diff_map[_expr.ExternalFunctionExpression] = _diff_ExternalFunctionExpression\n_diff_map[_expr.LinearExpression] = _diff_SumExpression\n_diff_map[_expr.AbsExpression] = _diff_abs\n\n_diff_map[_expr.NPV_ProductExpression] = _diff_ProductExpression\n_diff_map[_expr.NPV_DivisionExpression] = _diff_DivisionExpression\n_diff_map[_expr.NPV_PowExpression] = _diff_PowExpression\n_diff_map[_expr.NPV_SumExpression] = _diff_SumExpression\n_diff_map[_expr.NPV_NegationExpression] = _diff_NegationExpression\n_diff_map[_expr.NPV_UnaryFunctionExpression] = _diff_UnaryFunctionExpression\n_diff_map[_expr.NPV_ExternalFunctionExpression] = _diff_ExternalFunctionExpression\n_diff_map[_expr.NPV_AbsExpression] = _diff_abs\n\n\ndef _symbolic_value(x):\n return x\n\n\ndef _numeric_apply_operation(node, values):\n return node._apply_operation(values)\n\n\ndef _symbolic_apply_operation(node, values):\n return node\n\n\nclass _LeafToRootVisitor(ExpressionValueVisitor):\n def __init__(self, val_dict, der_dict, expr_list, numeric=True):\n \"\"\"\n Parameters\n ----------\n val_dict: ComponentMap\n der_dict: ComponentMap\n \"\"\"\n self.val_dict = val_dict\n self.der_dict = der_dict\n self.expr_list = expr_list\n assert len(self.expr_list) == 0\n assert len(self.val_dict) == 0\n assert len(self.der_dict) == 0\n if numeric:\n self.value_func = value\n self.operation_func = _numeric_apply_operation\n else:\n self.value_func = _symbolic_value\n self.operation_func = _symbolic_apply_operation\n\n def visit(self, node, values):\n self.val_dict[node] = self.operation_func(node, values)\n self.der_dict[node] = 0\n self.expr_list.append(node)\n return self.val_dict[node]\n\n def visiting_potential_leaf(self, node):\n if node in self.val_dict:\n return True, self.val_dict[node]\n\n if node.__class__ in nonpyomo_leaf_types:\n self.val_dict[node] = node\n self.der_dict[node] = 0\n return True, node\n\n if not node.is_expression_type():\n val = self.value_func(node)\n self.val_dict[node] = val\n self.der_dict[node] = 0\n return True, val\n\n return False, None\n\n\ndef _reverse_diff_helper(expr, numeric=True):\n val_dict = ComponentMap()\n der_dict = ComponentMap()\n expr_list = list()\n\n visitorA = _LeafToRootVisitor(val_dict, der_dict, expr_list, numeric=numeric)\n visitorA.dfs_postorder_stack(expr)\n\n der_dict[expr] = 1\n for e in reversed(expr_list):\n if e.__class__ in _diff_map:\n _diff_map[e.__class__](e, val_dict, der_dict)\n elif e.is_named_expression_type():\n _diff_GeneralExpression(e, val_dict, der_dict)\n else:\n raise DifferentiationException(\n 'Unsupported expression type for differentiation: {0}'.format(type(e))\n )\n\n return der_dict\n\n\ndef reverse_ad(expr):\n \"\"\"\n First order reverse automatic differentiation\n\n Parameters\n ----------\n expr: pyomo.core.expr.numeric_expr.NumericExpression\n expression to differentiate\n\n Returns\n -------\n ComponentMap\n component_map mapping variables to derivatives with respect\n to the corresponding variable\n \"\"\"\n return _reverse_diff_helper(expr, True)\n\n\ndef reverse_sd(expr):\n \"\"\"\n First order reverse symbolic differentiation\n\n Parameters\n ----------\n expr: pyomo.core.expr.numeric_expr.NumericExpression\n expression to differentiate\n\n Returns\n -------\n ComponentMap\n component_map mapping variables to derivatives with respect\n to the corresponding variable\n \"\"\"\n return _reverse_diff_helper(expr, False)\n", "path": "pyomo/core/expr/calculus/diff_with_pyomo.py" } ]
diff --git a/pyomo/core/expr/calculus/diff_with_pyomo.py b/pyomo/core/expr/calculus/diff_with_pyomo.py index 952e8ec6dd3..0e3ba3cc2b2 100644 --- a/pyomo/core/expr/calculus/diff_with_pyomo.py +++ b/pyomo/core/expr/calculus/diff_with_pyomo.py @@ -328,7 +328,7 @@ def _diff_GeneralExpression(node, val_dict, der_dict): val_dict: ComponentMap der_dict: ComponentMap """ - der_dict[node.expr] += der_dict[node] + der_dict[node.arg(0)] += der_dict[node] def _diff_ExternalFunctionExpression(node, val_dict, der_dict): diff --git a/pyomo/core/tests/unit/test_derivs.py b/pyomo/core/tests/unit/test_derivs.py index 9e89f2beac9..23a5a8bc7d1 100644 --- a/pyomo/core/tests/unit/test_derivs.py +++ b/pyomo/core/tests/unit/test_derivs.py @@ -230,6 +230,17 @@ def e2(m, i): symbolic = reverse_sd(m.o.expr) self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol) + def test_constant_named_expressions(self): + m = pyo.ConcreteModel() + m.x = pyo.Var(initialize=3) + m.e = pyo.Expression(expr=2) + + e = m.x * m.e + derivs = reverse_ad(e) + symbolic = reverse_sd(e) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) + self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) + def test_multiple_named_expressions(self): m = pyo.ConcreteModel() m.x = pyo.Var()
sopel-irc__sopel-1280
[url] meaning of `exclude` changed? i used to have the following in my `sopel.cfg`: ``` ini [url] exclude = exclusion_char = ! ``` ... and it worked fine up to (and including) 6.1.1. After an upgrade to 6.3.0, the bot was silent on `.title http://example.com/`, all of a sudden - the above`exclude` pattern would match everything! after removing the above block, the proper `.title` behavior was restored. the problem is the above is the default config generated when you run `sopel -w`: so by default, now, it seems that the url plugin config is basically broken and there's no way to configure the bot to _not_ be silent (unless you input an impossible regex) by default. [url] 'function' object has no attribute 'priority' Sopel v. 6.5.0 is not loading the `url` module properly. I am unable to get titles using `.title http://url.here`, and reloading the module gives this error: ```` <dgw> Sopel: reload url <Sopel> AttributeError: 'function' object has no attribute 'priority' (file "/usr/local/lib/python2.7/dist-packages/sopel/bot.py", line 213, in unregister) ```` Perhaps that's a red herring, because on startup Sopel prints to stdout: `Error in url setup procedure: nothing to repeat (../../../../../lib/python2.7/re.py:242)` My config includes no `exclude` rules, and `exclusion_char = ^`. This occurs both with an empty `exclude` option in the config file and with no `exclude` line at all.
[ { "content": "# coding=utf-8\n\"\"\"Types for creating section definitions.\n\nA section definition consists of a subclass of ``StaticSection``, on which any\nnumber of subclasses of ``BaseValidated`` (a few common ones of which are\navailable in this module) are assigned as attributes. These descriptors define\nhow to read values from, and write values to, the config file.\n\nAs an example, if one wanted to define the ``[spam]`` section as having an\n``eggs`` option, which contains a list of values, they could do this:\n\n >>> class SpamSection(StaticSection):\n ... eggs = ListAttribute('eggs')\n ...\n >>> SpamSection(config, 'spam')\n >>> print(config.spam.eggs)\n []\n >>> config.spam.eggs = ['goose', 'turkey', 'duck', 'chicken', 'quail']\n >>> print(config.spam.eggs)\n ['goose', 'turkey', 'duck', 'chicken', 'quail']\n >>> config.spam.eggs = 'herring'\n Traceback (most recent call last):\n ...\n ValueError: ListAttribute value must be a list.\n\"\"\"\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport os.path\nimport sys\nfrom sopel.tools import get_input\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\nif sys.version_info.major >= 3:\n unicode = str\n basestring = (str, bytes)\n\n\nclass NO_DEFAULT(object):\n \"\"\"A special value to indicate that there should be no default.\"\"\"\n\n\nclass StaticSection(object):\n \"\"\"A configuration section with parsed and validated settings.\n\n This class is intended to be subclassed with added ``ValidatedAttribute``\\s.\n \"\"\"\n def __init__(self, config, section_name, validate=True):\n if not config.parser.has_section(section_name):\n config.parser.add_section(section_name)\n self._parent = config\n self._parser = config.parser\n self._section_name = section_name\n for value in dir(self):\n try:\n getattr(self, value)\n except ValueError as e:\n raise ValueError(\n 'Invalid value for {}.{}: {}'.format(section_name, value,\n str(e))\n )\n except AttributeError:\n if validate:\n raise ValueError(\n 'Missing required value for {}.{}'.format(section_name,\n value)\n )\n\n def configure_setting(self, name, prompt, default=NO_DEFAULT):\n \"\"\"Return a validated value for this attribute from the terminal.\n\n ``prompt`` will be the docstring of the attribute if not given.\n\n If ``default`` is passed, it will be used if no value is given by the\n user. If it is not passed, the current value of the setting, or the\n default value if it's unset, will be used. Note that if ``default`` is\n passed, the current value of the setting will be ignored, even if it is\n not the attribute's default.\n \"\"\"\n clazz = getattr(self.__class__, name)\n if default is NO_DEFAULT:\n try:\n default = getattr(self, name)\n except AttributeError:\n pass\n except ValueError:\n print('The configured value for this option was invalid.')\n if clazz.default is not NO_DEFAULT:\n default = clazz.default\n while True:\n try:\n value = clazz.configure(prompt, default, self._parent, self._section_name)\n except ValueError as exc:\n print(exc)\n else:\n break\n setattr(self, name, value)\n\n\nclass BaseValidated(object):\n \"\"\"The base type for a descriptor in a ``StaticSection``.\"\"\"\n def __init__(self, name, default=None):\n \"\"\"\n ``name`` is the name of the setting in the section.\n ``default`` is the value to be returned if the setting is not set. If\n not given, AttributeError will be raised instead.\n \"\"\"\n self.name = name\n self.default = default\n\n def configure(self, prompt, default, parent, section_name):\n \"\"\"With the prompt and default, parse and return a value from terminal.\n \"\"\"\n if default is not NO_DEFAULT and default is not None:\n prompt = '{} [{}]'.format(prompt, default)\n value = get_input(prompt + ' ')\n if not value and default is NO_DEFAULT:\n raise ValueError(\"You must provide a value for this option.\")\n value = value or default\n return self.parse(value)\n\n def serialize(self, value):\n \"\"\"Take some object, and return the string to be saved to the file.\n\n Must be implemented in subclasses.\n \"\"\"\n raise NotImplemented(\"Serialize method must be implemented in subclass\")\n\n def parse(self, value):\n \"\"\"Take a string from the file, and return the appropriate object.\n\n Must be implemented in subclasses.\"\"\"\n raise NotImplemented(\"Parse method must be implemented in subclass\")\n\n def __get__(self, instance, owner=None):\n if instance is None:\n # If instance is None, we're getting from a section class, not an\n # instance of a session class. It makes the wizard code simpler\n # (and is really just more intuitive) to return the descriptor\n # instance here.\n return self\n\n if instance._parser.has_option(instance._section_name, self.name):\n value = instance._parser.get(instance._section_name, self.name)\n else:\n if self.default is not NO_DEFAULT:\n return self.default\n raise AttributeError(\n \"Missing required value for {}.{}\".format(\n instance._section_name, self.name\n )\n )\n return self.parse(value)\n\n def __set__(self, instance, value):\n if value is None:\n instance._parser.remove_option(instance._section_name, self.name)\n return\n value = self.serialize(value)\n instance._parser.set(instance._section_name, self.name, value)\n\n def __delete__(self, instance):\n instance._parser.remove_option(instance._section_name, self.name)\n\n\ndef _parse_boolean(value):\n if value is True or value == 1:\n return value\n if isinstance(value, basestring):\n return value.lower() in ['1', 'yes', 'y', 'true', 'on']\n return bool(value)\n\n\ndef _serialize_boolean(value):\n return 'true' if _parse_boolean(value) else 'false'\n\n\nclass ValidatedAttribute(BaseValidated):\n def __init__(self, name, parse=None, serialize=None, default=None):\n \"\"\"A descriptor for settings in a ``StaticSection``\n\n ``parse`` is the function to be used to read the string and create the\n appropriate object. If not given, return the string as-is.\n ``serialize`` takes an object, and returns the value to be written to\n the file. If not given, defaults to ``unicode``.\n \"\"\"\n self.name = name\n if parse == bool:\n parse = _parse_boolean\n if not serialize or serialize == bool:\n serialize = _serialize_boolean\n self.parse = parse or self.parse\n self.serialize = serialize or self.serialize\n self.default = default\n\n def serialize(self, value):\n return unicode(value)\n\n def parse(self, value):\n return value\n\n def configure(self, prompt, default, parent, section_name):\n if self.parse == _parse_boolean:\n prompt += ' (y/n)'\n default = 'y' if default else 'n'\n return super(ValidatedAttribute, self).configure(prompt, default, parent, section_name)\n\n\nclass ListAttribute(BaseValidated):\n \"\"\"A config attribute containing a list of string values.\n\n Values are saved to the file as a comma-separated list. It does not\n currently support commas within items in the list. By default, the spaces\n before and after each item are stripped; you can override this by passing\n ``strip=False``.\"\"\"\n def __init__(self, name, strip=True, default=None):\n default = default or []\n super(ListAttribute, self).__init__(name, default=default)\n self.strip = strip\n\n def parse(self, value):\n value = value.split(',')\n if self.strip:\n return [v.strip() for v in value]\n else:\n return value\n\n def serialize(self, value):\n if not isinstance(value, (list, set)):\n raise ValueError('ListAttribute value must be a list.')\n return ','.join(value)\n\n def configure(self, prompt, default, parent, section_name):\n each_prompt = '?'\n if isinstance(prompt, tuple):\n each_prompt = prompt[1]\n prompt = prompt[0]\n\n if default is not NO_DEFAULT:\n default = ','.join(default)\n prompt = '{} [{}]'.format(prompt, default)\n else:\n default = ''\n print(prompt)\n values = []\n value = get_input(each_prompt + ' ') or default\n while value:\n values.append(value)\n value = get_input(each_prompt + ' ')\n return self.parse(','.join(values))\n\n\nclass ChoiceAttribute(BaseValidated):\n \"\"\"A config attribute which must be one of a set group of options.\n\n Currently, the choices can only be strings.\"\"\"\n def __init__(self, name, choices, default=None):\n super(ChoiceAttribute, self).__init__(name, default=default)\n self.choices = choices\n\n def parse(self, value):\n if value in self.choices:\n return value\n else:\n raise ValueError('Value must be in {}'.format(self.choices))\n\n def serialize(self, value):\n if value in self.choices:\n return value\n else:\n raise ValueError('Value must be in {}'.format(self.choices))\n\n\nclass FilenameAttribute(BaseValidated):\n \"\"\"A config attribute which must be a file or directory.\"\"\"\n def __init__(self, name, relative=True, directory=False, default=None):\n \"\"\"\n ``relative`` is whether the path should be relative to the location\n of the config file (absolute paths will still be absolute). If\n ``directory`` is True, the path must indicate a directory, rather than\n a file.\n \"\"\"\n super(FilenameAttribute, self).__init__(name, default=default)\n self.relative = relative\n self.directory = directory\n\n def __get__(self, instance, owner=None):\n if instance is None:\n return self\n if instance._parser.has_option(instance._section_name, self.name):\n value = instance._parser.get(instance._section_name, self.name)\n else:\n if self.default is not NO_DEFAULT:\n value = self.default\n else:\n raise AttributeError(\n \"Missing required value for {}.{}\".format(\n instance._section_name, self.name\n )\n )\n main_config = instance._parent\n this_section = getattr(main_config, instance._section_name)\n return self.parse(main_config, this_section, value)\n\n def __set__(self, instance, value):\n main_config = instance._parent\n this_section = getattr(main_config, instance._section_name)\n value = self.serialize(main_config, this_section, value)\n instance._parser.set(instance._section_name, self.name, value)\n\n def configure(self, prompt, default, parent, section_name):\n \"\"\"With the prompt and default, parse and return a value from terminal.\n \"\"\"\n if default is not NO_DEFAULT and default is not None:\n prompt = '{} [{}]'.format(prompt, default)\n value = get_input(prompt + ' ')\n if not value and default is NO_DEFAULT:\n raise ValueError(\"You must provide a value for this option.\")\n value = value or default\n return self.parse(parent, section_name, value)\n\n def parse(self, main_config, this_section, value):\n if value is None:\n return\n\n value = os.path.expanduser(value)\n\n if not os.path.isabs(value):\n if not self.relative:\n raise ValueError(\"Value must be an absolute path.\")\n value = os.path.join(main_config.homedir, value)\n\n if self.directory and not os.path.isdir(value):\n try:\n os.makedirs(value)\n except OSError:\n raise ValueError(\n \"Value must be an existing or creatable directory.\")\n if not self.directory and not os.path.isfile(value):\n try:\n open(value, 'w').close()\n except OSError:\n raise ValueError(\"Value must be an existant or creatable file.\")\n return value\n\n def serialize(self, main_config, this_section, value):\n self.parse(main_config, this_section, value)\n return value # So that it's still relative\n", "path": "sopel/config/types.py" } ]
[ { "content": "# coding=utf-8\n\"\"\"Types for creating section definitions.\n\nA section definition consists of a subclass of ``StaticSection``, on which any\nnumber of subclasses of ``BaseValidated`` (a few common ones of which are\navailable in this module) are assigned as attributes. These descriptors define\nhow to read values from, and write values to, the config file.\n\nAs an example, if one wanted to define the ``[spam]`` section as having an\n``eggs`` option, which contains a list of values, they could do this:\n\n >>> class SpamSection(StaticSection):\n ... eggs = ListAttribute('eggs')\n ...\n >>> SpamSection(config, 'spam')\n >>> print(config.spam.eggs)\n []\n >>> config.spam.eggs = ['goose', 'turkey', 'duck', 'chicken', 'quail']\n >>> print(config.spam.eggs)\n ['goose', 'turkey', 'duck', 'chicken', 'quail']\n >>> config.spam.eggs = 'herring'\n Traceback (most recent call last):\n ...\n ValueError: ListAttribute value must be a list.\n\"\"\"\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\nimport os.path\nimport sys\nfrom sopel.tools import get_input\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\nif sys.version_info.major >= 3:\n unicode = str\n basestring = (str, bytes)\n\n\nclass NO_DEFAULT(object):\n \"\"\"A special value to indicate that there should be no default.\"\"\"\n\n\nclass StaticSection(object):\n \"\"\"A configuration section with parsed and validated settings.\n\n This class is intended to be subclassed with added ``ValidatedAttribute``\\s.\n \"\"\"\n def __init__(self, config, section_name, validate=True):\n if not config.parser.has_section(section_name):\n config.parser.add_section(section_name)\n self._parent = config\n self._parser = config.parser\n self._section_name = section_name\n for value in dir(self):\n try:\n getattr(self, value)\n except ValueError as e:\n raise ValueError(\n 'Invalid value for {}.{}: {}'.format(section_name, value,\n str(e))\n )\n except AttributeError:\n if validate:\n raise ValueError(\n 'Missing required value for {}.{}'.format(section_name,\n value)\n )\n\n def configure_setting(self, name, prompt, default=NO_DEFAULT):\n \"\"\"Return a validated value for this attribute from the terminal.\n\n ``prompt`` will be the docstring of the attribute if not given.\n\n If ``default`` is passed, it will be used if no value is given by the\n user. If it is not passed, the current value of the setting, or the\n default value if it's unset, will be used. Note that if ``default`` is\n passed, the current value of the setting will be ignored, even if it is\n not the attribute's default.\n \"\"\"\n clazz = getattr(self.__class__, name)\n if default is NO_DEFAULT:\n try:\n default = getattr(self, name)\n except AttributeError:\n pass\n except ValueError:\n print('The configured value for this option was invalid.')\n if clazz.default is not NO_DEFAULT:\n default = clazz.default\n while True:\n try:\n value = clazz.configure(prompt, default, self._parent, self._section_name)\n except ValueError as exc:\n print(exc)\n else:\n break\n setattr(self, name, value)\n\n\nclass BaseValidated(object):\n \"\"\"The base type for a descriptor in a ``StaticSection``.\"\"\"\n def __init__(self, name, default=None):\n \"\"\"\n ``name`` is the name of the setting in the section.\n ``default`` is the value to be returned if the setting is not set. If\n not given, AttributeError will be raised instead.\n \"\"\"\n self.name = name\n self.default = default\n\n def configure(self, prompt, default, parent, section_name):\n \"\"\"With the prompt and default, parse and return a value from terminal.\n \"\"\"\n if default is not NO_DEFAULT and default is not None:\n prompt = '{} [{}]'.format(prompt, default)\n value = get_input(prompt + ' ')\n if not value and default is NO_DEFAULT:\n raise ValueError(\"You must provide a value for this option.\")\n value = value or default\n return self.parse(value)\n\n def serialize(self, value):\n \"\"\"Take some object, and return the string to be saved to the file.\n\n Must be implemented in subclasses.\n \"\"\"\n raise NotImplemented(\"Serialize method must be implemented in subclass\")\n\n def parse(self, value):\n \"\"\"Take a string from the file, and return the appropriate object.\n\n Must be implemented in subclasses.\"\"\"\n raise NotImplemented(\"Parse method must be implemented in subclass\")\n\n def __get__(self, instance, owner=None):\n if instance is None:\n # If instance is None, we're getting from a section class, not an\n # instance of a session class. It makes the wizard code simpler\n # (and is really just more intuitive) to return the descriptor\n # instance here.\n return self\n\n if instance._parser.has_option(instance._section_name, self.name):\n value = instance._parser.get(instance._section_name, self.name)\n else:\n if self.default is not NO_DEFAULT:\n return self.default\n raise AttributeError(\n \"Missing required value for {}.{}\".format(\n instance._section_name, self.name\n )\n )\n return self.parse(value)\n\n def __set__(self, instance, value):\n if value is None:\n instance._parser.remove_option(instance._section_name, self.name)\n return\n value = self.serialize(value)\n instance._parser.set(instance._section_name, self.name, value)\n\n def __delete__(self, instance):\n instance._parser.remove_option(instance._section_name, self.name)\n\n\ndef _parse_boolean(value):\n if value is True or value == 1:\n return value\n if isinstance(value, basestring):\n return value.lower() in ['1', 'yes', 'y', 'true', 'on']\n return bool(value)\n\n\ndef _serialize_boolean(value):\n return 'true' if _parse_boolean(value) else 'false'\n\n\nclass ValidatedAttribute(BaseValidated):\n def __init__(self, name, parse=None, serialize=None, default=None):\n \"\"\"A descriptor for settings in a ``StaticSection``\n\n ``parse`` is the function to be used to read the string and create the\n appropriate object. If not given, return the string as-is.\n ``serialize`` takes an object, and returns the value to be written to\n the file. If not given, defaults to ``unicode``.\n \"\"\"\n self.name = name\n if parse == bool:\n parse = _parse_boolean\n if not serialize or serialize == bool:\n serialize = _serialize_boolean\n self.parse = parse or self.parse\n self.serialize = serialize or self.serialize\n self.default = default\n\n def serialize(self, value):\n return unicode(value)\n\n def parse(self, value):\n return value\n\n def configure(self, prompt, default, parent, section_name):\n if self.parse == _parse_boolean:\n prompt += ' (y/n)'\n default = 'y' if default else 'n'\n return super(ValidatedAttribute, self).configure(prompt, default, parent, section_name)\n\n\nclass ListAttribute(BaseValidated):\n \"\"\"A config attribute containing a list of string values.\n\n Values are saved to the file as a comma-separated list. It does not\n currently support commas within items in the list. By default, the spaces\n before and after each item are stripped; you can override this by passing\n ``strip=False``.\"\"\"\n def __init__(self, name, strip=True, default=None):\n default = default or []\n super(ListAttribute, self).__init__(name, default=default)\n self.strip = strip\n\n def parse(self, value):\n value = list(filter(None, value.split(',')))\n if self.strip:\n return [v.strip() for v in value]\n else:\n return value\n\n def serialize(self, value):\n if not isinstance(value, (list, set)):\n raise ValueError('ListAttribute value must be a list.')\n return ','.join(value)\n\n def configure(self, prompt, default, parent, section_name):\n each_prompt = '?'\n if isinstance(prompt, tuple):\n each_prompt = prompt[1]\n prompt = prompt[0]\n\n if default is not NO_DEFAULT:\n default = ','.join(default)\n prompt = '{} [{}]'.format(prompt, default)\n else:\n default = ''\n print(prompt)\n values = []\n value = get_input(each_prompt + ' ') or default\n while value:\n values.append(value)\n value = get_input(each_prompt + ' ')\n return self.parse(','.join(values))\n\n\nclass ChoiceAttribute(BaseValidated):\n \"\"\"A config attribute which must be one of a set group of options.\n\n Currently, the choices can only be strings.\"\"\"\n def __init__(self, name, choices, default=None):\n super(ChoiceAttribute, self).__init__(name, default=default)\n self.choices = choices\n\n def parse(self, value):\n if value in self.choices:\n return value\n else:\n raise ValueError('Value must be in {}'.format(self.choices))\n\n def serialize(self, value):\n if value in self.choices:\n return value\n else:\n raise ValueError('Value must be in {}'.format(self.choices))\n\n\nclass FilenameAttribute(BaseValidated):\n \"\"\"A config attribute which must be a file or directory.\"\"\"\n def __init__(self, name, relative=True, directory=False, default=None):\n \"\"\"\n ``relative`` is whether the path should be relative to the location\n of the config file (absolute paths will still be absolute). If\n ``directory`` is True, the path must indicate a directory, rather than\n a file.\n \"\"\"\n super(FilenameAttribute, self).__init__(name, default=default)\n self.relative = relative\n self.directory = directory\n\n def __get__(self, instance, owner=None):\n if instance is None:\n return self\n if instance._parser.has_option(instance._section_name, self.name):\n value = instance._parser.get(instance._section_name, self.name)\n else:\n if self.default is not NO_DEFAULT:\n value = self.default\n else:\n raise AttributeError(\n \"Missing required value for {}.{}\".format(\n instance._section_name, self.name\n )\n )\n main_config = instance._parent\n this_section = getattr(main_config, instance._section_name)\n return self.parse(main_config, this_section, value)\n\n def __set__(self, instance, value):\n main_config = instance._parent\n this_section = getattr(main_config, instance._section_name)\n value = self.serialize(main_config, this_section, value)\n instance._parser.set(instance._section_name, self.name, value)\n\n def configure(self, prompt, default, parent, section_name):\n \"\"\"With the prompt and default, parse and return a value from terminal.\n \"\"\"\n if default is not NO_DEFAULT and default is not None:\n prompt = '{} [{}]'.format(prompt, default)\n value = get_input(prompt + ' ')\n if not value and default is NO_DEFAULT:\n raise ValueError(\"You must provide a value for this option.\")\n value = value or default\n return self.parse(parent, section_name, value)\n\n def parse(self, main_config, this_section, value):\n if value is None:\n return\n\n value = os.path.expanduser(value)\n\n if not os.path.isabs(value):\n if not self.relative:\n raise ValueError(\"Value must be an absolute path.\")\n value = os.path.join(main_config.homedir, value)\n\n if self.directory and not os.path.isdir(value):\n try:\n os.makedirs(value)\n except OSError:\n raise ValueError(\n \"Value must be an existing or creatable directory.\")\n if not self.directory and not os.path.isfile(value):\n try:\n open(value, 'w').close()\n except OSError:\n raise ValueError(\"Value must be an existant or creatable file.\")\n return value\n\n def serialize(self, main_config, this_section, value):\n self.parse(main_config, this_section, value)\n return value # So that it's still relative\n", "path": "sopel/config/types.py" } ]
diff --git a/sopel/config/types.py b/sopel/config/types.py index 55d5167886..79b396e08f 100644 --- a/sopel/config/types.py +++ b/sopel/config/types.py @@ -222,7 +222,7 @@ def __init__(self, name, strip=True, default=None): self.strip = strip def parse(self, value): - value = value.split(',') + value = list(filter(None, value.split(','))) if self.strip: return [v.strip() for v in value] else:
Nitrate__Nitrate-381
Mark Nitrate as not zip_safe Add `zip_safe=False` to `setup.py` because Nitrate cannot run from a zip file directly.
[ { "content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n 'mysql': ['PyMySQL == 0.9.2'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n 'mysql': ['PyMySQL == 0.9.2'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 65376dbb..187b670d 100644 --- a/setup.py +++ b/setup.py @@ -90,6 +90,7 @@ def get_long_description(): extras_require=extras_require, packages=find_packages(), include_package_data=True, + zip_safe=False, classifiers=[ 'Framework :: Django', 'Framework :: Django :: 1.11',
urllib3__urllib3-987
urllib3 fails to install on centos7 due to old setuptools not supporting <=, < environment markers. Current urllib3 fails to install on centos7. This bug was most likely introduced after https://github.com/shazow/urllib3/commit/9f5454eac808a105307b2d363c99ce97e5109821. centos7 ships a very old version of setuptools (0.9.8) which does not support `<=` as an environment marker. See https://github.com/pypa/setuptools/issues/380. ``` $ python --version Python 2.7.5 $ rpm -qa python-setuptools python-setuptools-0.9.8-4.el7.noarch $ lsb_release -a ... Description: CentOS Linux release 7.2.1511 (Core) Release: 7.2.1511 $ virtualenv venv ... $ venv/bin/pip install urllib3 Downloading/unpacking urllib3 Downloading urllib3-1.18.tar.gz (183kB): 183kB downloaded Running setup.py egg_info for package urllib3 error in urllib3 setup command: Invalid environment marker: python_version <= "2.7" Complete output from command python setup.py egg_info: error in urllib3 setup command: Invalid environment marker: python_version <= "2.7" ---------------------------------------- Cleaning up... Command python setup.py egg_info failed with error code 1 in /home/rene/src/venv/build/urllib3 Storing complete log in /home/rene/.pip/pip.log ``` Installing https://github.com/shazow/urllib3/commit/f620d997134708b09560ca5797aa79a59a2ef4c0 (commit before 9f5454eac808a105307b2d363c99ce97e5109821) works fine. ``` $ venv/bin/pip install git+git://github.com/shazow/urllib3.git@f620d997134708b09560ca5797aa79a59a2ef4c0 ... Successfully installed urllib3 Cleaning up... ``` But 9f5454eac808a105307b2d363c99ce97e5109821 fails. ``` $ venv/bin/pip install git+git://github.com/shazow/urllib3.git@9f5454eac808a105307b2d363c99ce97e5109821 Downloading/unpacking git+git://github.com/shazow/urllib3.git@9f5454eac808a105307b2d363c99ce97e5109821 Cloning git://github.com/shazow/urllib3.git (to 9f5454eac808a105307b2d363c99ce97e5109821) to /tmp/pip-lnVDAG-build Could not find a tag or branch '9f5454eac808a105307b2d363c99ce97e5109821', assuming commit. Running setup.py egg_info for package from git+git://github.com/shazow/urllib3.git@9f5454eac808a105307b2d363c99ce97e5109821 error in urllib3 setup command: Invalid environment marker: python_version < "3.3" Complete output from command python setup.py egg_info: error in urllib3 setup command: Invalid environment marker: python_version < "3.3" ---------------------------------------- Cleaning up... Command python setup.py egg_info failed with error code 1 in /tmp/pip-lnVDAG-build Storing complete log in /home/rene/.pip/pip.log ``` urllib3 1.17 setup.py does not ship with < or <= markers so my workaround right now is to install urllib3==1.17.
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, 'urllib3', '__init__.py')) as fp:\n VERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\n\nwith codecs.open('README.rst', encoding='utf-8') as fp:\n readme = fp.read()\nwith codecs.open('CHANGES.rst', encoding='utf-8') as fp:\n changes = fp.read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='https://urllib3.readthedocs.io/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.contrib',\n 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n ],\n 'secure:python_version <= \"2.7\"': [\n \"ipaddress\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n ]\n },\n )\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, 'urllib3', '__init__.py')) as fp:\n VERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\n\nwith codecs.open('README.rst', encoding='utf-8') as fp:\n readme = fp.read()\nwith codecs.open('CHANGES.rst', encoding='utf-8') as fp:\n changes = fp.read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='https://urllib3.readthedocs.io/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.contrib',\n 'urllib3.util',\n ],\n requires=[],\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'nose',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n \"ipaddress\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n ]\n },\n )\n", "path": "setup.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 0fd23a0456..29a579b892 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -5,7 +5,10 @@ dev (master) ------------ * urllib3 now respects Retry-After headers on 413, 429, and 503 responses when - using the default retry logic. + using the default retry logic. (Pull #955) + +* Remove markers from setup.py to assist ancient setuptools versions. (Issue + #986) * ... [Short description of non-trivial change.] (Issue #) diff --git a/setup.py b/setup.py index 196e0e5f6c..93950e5d1a 100644 --- a/setup.py +++ b/setup.py @@ -59,8 +59,6 @@ 'cryptography>=1.3.4', 'idna>=2.0.0', 'certifi', - ], - 'secure:python_version <= "2.7"': [ "ipaddress", ], 'socks': [
bookwyrm-social__bookwyrm-3193
Switching editions changes "shelved" date **Describe the bug** When switching editions of a book already on your "To Read" list, the "shelved" date is changed to today's date. **To Reproduce** Steps to reproduce the behavior: 1. Pick any book on your "To read" list with more than one edition 2. Pick another edition and switch to this 3. Observe that the book's shelved date is now today **Expected behavior** This shouldn't changed the shelved date **Instance** https://books.theunseen.city --- **Desktop (please complete the following information):** - OS: MacOS 14.1 - Browser: Firefox - Version: 20.0 (64-bit)
[ { "content": "\"\"\" the good stuff! the books! \"\"\"\nfrom functools import reduce\nimport operator\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request\n\n\n# pylint: disable=no-self-use\nclass Editions(View):\n \"\"\"list of editions\"\"\"\n\n def get(self, request, book_id):\n \"\"\"list of editions of a book\"\"\"\n work = get_object_or_404(models.Work, id=book_id)\n\n if is_api_request(request):\n return ActivitypubResponse(work.to_edition_list(**request.GET))\n filters = {}\n\n if request.GET.get(\"language\"):\n filters[\"languages__contains\"] = [request.GET.get(\"language\")]\n if request.GET.get(\"format\"):\n filters[\"physical_format__iexact\"] = request.GET.get(\"format\")\n\n editions = work.editions.order_by(\"-edition_rank\")\n languages = set(sum(editions.values_list(\"languages\", flat=True), []))\n\n editions = editions.filter(**filters)\n\n query = request.GET.get(\"q\")\n if query:\n searchable_array_fields = [\"languages\", \"publishers\"]\n searchable_fields = [\n \"title\",\n \"physical_format\",\n \"isbn_10\",\n \"isbn_13\",\n \"oclc_number\",\n \"asin\",\n \"aasin\",\n \"isfdb\",\n ]\n search_filter_entries = [\n {f\"{f}__icontains\": query} for f in searchable_fields\n ] + [{f\"{f}__iexact\": query} for f in searchable_array_fields]\n editions = editions.filter(\n reduce(operator.or_, (Q(**f) for f in search_filter_entries))\n )\n\n paginated = Paginator(editions, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"editions\": page,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n \"work\": work,\n \"work_form\": forms.EditionFromWorkForm(instance=work),\n \"languages\": languages,\n \"formats\": set(\n e.physical_format.lower() for e in editions if e.physical_format\n ),\n }\n return TemplateResponse(request, \"book/editions/editions.html\", data)\n\n\n@login_required\n@require_POST\[email protected]\ndef switch_edition(request):\n \"\"\"switch your copy of a book to a different edition\"\"\"\n edition_id = request.POST.get(\"edition\")\n new_edition = get_object_or_404(models.Edition, id=edition_id)\n shelfbooks = models.ShelfBook.objects.filter(\n book__parent_work=new_edition.parent_work, shelf__user=request.user\n )\n for shelfbook in shelfbooks.all():\n with transaction.atomic():\n models.ShelfBook.objects.create(\n created_date=shelfbook.created_date,\n user=shelfbook.user,\n shelf=shelfbook.shelf,\n book=new_edition,\n )\n shelfbook.delete()\n\n readthroughs = models.ReadThrough.objects.filter(\n book__parent_work=new_edition.parent_work, user=request.user\n )\n for readthrough in readthroughs.all():\n readthrough.book = new_edition\n readthrough.save()\n\n return redirect(f\"/book/{new_edition.id}\")\n", "path": "bookwyrm/views/books/editions.py" } ]
[ { "content": "\"\"\" the good stuff! the books! \"\"\"\nfrom functools import reduce\nimport operator\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request\n\n\n# pylint: disable=no-self-use\nclass Editions(View):\n \"\"\"list of editions\"\"\"\n\n def get(self, request, book_id):\n \"\"\"list of editions of a book\"\"\"\n work = get_object_or_404(models.Work, id=book_id)\n\n if is_api_request(request):\n return ActivitypubResponse(work.to_edition_list(**request.GET))\n filters = {}\n\n if request.GET.get(\"language\"):\n filters[\"languages__contains\"] = [request.GET.get(\"language\")]\n if request.GET.get(\"format\"):\n filters[\"physical_format__iexact\"] = request.GET.get(\"format\")\n\n editions = work.editions.order_by(\"-edition_rank\")\n languages = set(sum(editions.values_list(\"languages\", flat=True), []))\n\n editions = editions.filter(**filters)\n\n query = request.GET.get(\"q\")\n if query:\n searchable_array_fields = [\"languages\", \"publishers\"]\n searchable_fields = [\n \"title\",\n \"physical_format\",\n \"isbn_10\",\n \"isbn_13\",\n \"oclc_number\",\n \"asin\",\n \"aasin\",\n \"isfdb\",\n ]\n search_filter_entries = [\n {f\"{f}__icontains\": query} for f in searchable_fields\n ] + [{f\"{f}__iexact\": query} for f in searchable_array_fields]\n editions = editions.filter(\n reduce(operator.or_, (Q(**f) for f in search_filter_entries))\n )\n\n paginated = Paginator(editions, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"editions\": page,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n \"work\": work,\n \"work_form\": forms.EditionFromWorkForm(instance=work),\n \"languages\": languages,\n \"formats\": set(\n e.physical_format.lower() for e in editions if e.physical_format\n ),\n }\n return TemplateResponse(request, \"book/editions/editions.html\", data)\n\n\n@login_required\n@require_POST\[email protected]\ndef switch_edition(request):\n \"\"\"switch your copy of a book to a different edition\"\"\"\n edition_id = request.POST.get(\"edition\")\n new_edition = get_object_or_404(models.Edition, id=edition_id)\n shelfbooks = models.ShelfBook.objects.filter(\n book__parent_work=new_edition.parent_work, shelf__user=request.user\n )\n for shelfbook in shelfbooks.all():\n with transaction.atomic():\n models.ShelfBook.objects.create(\n created_date=shelfbook.created_date,\n user=shelfbook.user,\n shelf=shelfbook.shelf,\n book=new_edition,\n shelved_date=shelfbook.shelved_date,\n )\n shelfbook.delete()\n\n readthroughs = models.ReadThrough.objects.filter(\n book__parent_work=new_edition.parent_work, user=request.user\n )\n for readthrough in readthroughs.all():\n readthrough.book = new_edition\n readthrough.save()\n\n return redirect(f\"/book/{new_edition.id}\")\n", "path": "bookwyrm/views/books/editions.py" } ]
diff --git a/bookwyrm/views/books/editions.py b/bookwyrm/views/books/editions.py index 54d1bd84c1..5202531f5d 100644 --- a/bookwyrm/views/books/editions.py +++ b/bookwyrm/views/books/editions.py @@ -93,6 +93,7 @@ def switch_edition(request): user=shelfbook.user, shelf=shelfbook.shelf, book=new_edition, + shelved_date=shelfbook.shelved_date, ) shelfbook.delete()
hpcaitech__ColossalAI-5060
[tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests [BUG]: gemini 插件微调时,疑似不能正确加载模型权重 ### 🐛 Describe the bug # 环境 ``` ------------ Environment ------------ Colossal-AI version: 0.3.4 PyTorch version: 2.0.1 System CUDA version: 11.7 CUDA version required by PyTorch: 11.7 ``` # BUG 细节 微调代码修改自:https://github.com/hpcaitech/ColossalAI/blob/main/examples/language/llama2/finetune.py 加载模型:https://github.com/hpcaitech/ColossalAI/blob/main/examples/language/llama2/finetune.py#L237 除了plugin的类型其余变量都保持一致,发现zero2时,loss的表现正常,而使用gemini时,更像是从一个随机初始化的weight进行优化 zero2,loss 正常从比较低的水平开始下降: ![image](https://github.com/hpcaitech/ColossalAI/assets/26140960/5ce96b4c-b85b-432e-8ed5-2dc58c7982ed) gemini,loss 从特别高的水平下降: ![image](https://github.com/hpcaitech/ColossalAI/assets/26140960/e14c9afb-a112-44fd-a3ef-ca721d1eefa4) ### Environment _No response_
[ { "content": "import argparse\nimport math\nimport os\nimport resource\nfrom contextlib import nullcontext\nfrom functools import partial\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom attn import SUPPORT_XFORMERS, replace_xformers\nfrom data_utils import load_json, prepare_dataloader, save_json\nfrom datasets import load_dataset\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\nfrom transformers.models.llama.configuration_llama import LlamaConfig\nfrom transformers.models.llama.modeling_llama import LlamaForCausalLM\nfrom transformers.models.llama.tokenization_llama import LlamaTokenizer\n\nimport colossalai\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin\nfrom colossalai.cluster import DistCoordinator\nfrom colossalai.lazy import LazyInitContext\nfrom colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR\nfrom colossalai.nn.optimizer import HybridAdam\nfrom colossalai.utils import get_current_device\n\n\ndef get_model_numel(model: nn.Module) -> int:\n return sum(p.numel() for p in model.parameters())\n\n\ndef format_numel_str(numel: int) -> str:\n B = 1024**3\n M = 1024**2\n K = 1024\n if numel >= B:\n return f\"{numel / B:.2f} B\"\n elif numel >= M:\n return f\"{numel / M:.2f} M\"\n elif numel >= K:\n return f\"{numel / K:.2f} K\"\n else:\n return f\"{numel}\"\n\n\ndef tokenize_batch_for_finetune(batch, tokenizer: Optional[LlamaTokenizer] = None, max_length: int = 2048):\n texts = [sample[\"prompt\"] + sample[\"completion\"] for sample in batch]\n data = tokenizer(texts, return_tensors=\"pt\", padding=\"max_length\", truncation=True, max_length=max_length)\n data = {k: v.cuda() for k, v in data.items()}\n data[\"labels\"] = data[\"input_ids\"].clone()\n return data\n\n\ndef all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:\n dist.all_reduce(tensor, op=dist.ReduceOp.SUM)\n tensor.div_(dist.get_world_size())\n return tensor\n\n\ndef save(\n booster: Booster,\n model: nn.Module,\n optimizer: Optimizer,\n lr_scheduler: _LRScheduler,\n epoch: int,\n step: int,\n batch_size: int,\n coordinator: DistCoordinator,\n save_dir: str,\n):\n save_dir = os.path.join(save_dir, f\"epoch{epoch}-step{step}\")\n os.makedirs(os.path.join(save_dir, \"model\"), exist_ok=True)\n\n booster.save_model(model, os.path.join(save_dir, \"model\"), shard=True)\n booster.save_optimizer(optimizer, os.path.join(save_dir, \"optimizer\"), shard=True)\n booster.save_lr_scheduler(lr_scheduler, os.path.join(save_dir, \"lr_scheduler\"))\n running_states = {\n \"epoch\": epoch,\n \"step\": step,\n \"sample_start_index\": step * batch_size,\n }\n if coordinator.is_master():\n save_json(running_states, os.path.join(save_dir, \"running_states.json\"))\n\n\ndef load(\n booster: Booster, model: nn.Module, optimizer: Optimizer, lr_scheduler: _LRScheduler, load_dir: str\n) -> Tuple[int, int, int]:\n booster.load_model(model, os.path.join(load_dir, \"model\"))\n booster.load_optimizer(optimizer, os.path.join(load_dir, \"optimizer\"))\n booster.load_lr_scheduler(lr_scheduler, os.path.join(load_dir, \"lr_scheduler\"))\n running_states = load_json(os.path.join(load_dir, \"running_states.json\"))\n return running_states[\"epoch\"], running_states[\"step\"], running_states[\"sample_start_index\"]\n\n\ndef _criterion(outputs, inputs):\n return outputs.loss\n\n\ndef main():\n # ==============================\n # Parse Arguments\n # ==============================\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_path\", type=str, help=\"pretrained checkpoint path, used with mode==finetune\")\n parser.add_argument(\n \"-p\",\n \"--plugin\",\n choices=[\"gemini\", \"gemini_auto\", \"zero2\", \"zero2_cpu\", \"hybrid_parallel\"],\n default=\"gemini\",\n help=\"Choose which plugin to use\",\n )\n parser.add_argument(\"-d\", \"--dataset\", type=str, default=\"yizhongw/self_instruct\", help=\"Data set path\")\n parser.add_argument(\"--task_name\", type=str, default=\"super_natural_instructions\", help=\"task to run\")\n parser.add_argument(\"-e\", \"--num_epochs\", type=int, default=1, help=\"Number of epochs\")\n parser.add_argument(\"-b\", \"--batch_size\", type=int, default=2, help=\"Local batch size\")\n parser.add_argument(\"--lr\", type=float, default=3e-4, help=\"Learning rate\")\n parser.add_argument(\"-w\", \"--weigth_decay\", type=float, default=0.1, help=\"Weight decay\")\n parser.add_argument(\"-g\", \"--grad_checkpoint\", action=\"store_true\", help=\"Use gradient checkpointing\")\n parser.add_argument(\"-l\", \"--max_length\", type=int, default=4096, help=\"Max sequence length\")\n parser.add_argument(\"-x\", \"--mixed_precision\", default=\"fp16\", choices=[\"fp16\", \"bf16\"], help=\"Mixed precision\")\n parser.add_argument(\"-i\", \"--save_interval\", type=int, default=1000, help=\"Save interval\")\n parser.add_argument(\"-o\", \"--save_dir\", type=str, default=\"checkpoint\", help=\"Checkpoint directory\")\n parser.add_argument(\"-f\", \"--load\", type=str, default=None, help=\"Load checkpoint\")\n parser.add_argument(\"--grad_clip\", type=float, default=1.0, help=\"Gradient clipping\")\n parser.add_argument(\"-t\", \"--tensorboard_dir\", type=str, default=\"tb_logs\", help=\"Tensorboard directory\")\n parser.add_argument(\"-a\", \"--flash_attention\", action=\"store_true\", help=\"Use Flash Attention\")\n args = parser.parse_args()\n\n # ==============================\n # Initialize Distributed Training\n # ==============================\n colossalai.launch_from_torch({})\n coordinator = DistCoordinator()\n\n # ==============================\n # Initialize Booster\n # ==============================\n if args.plugin == \"gemini\":\n plugin = GeminiPlugin(precision=args.mixed_precision, initial_scale=2**16, max_norm=args.grad_clip)\n elif args.plugin == \"gemini_auto\":\n plugin = GeminiPlugin(\n precision=args.mixed_precision, placement_policy=\"auto\", initial_scale=2**16, max_norm=args.grad_clip\n )\n elif args.plugin == \"zero2\":\n plugin = LowLevelZeroPlugin(\n stage=2, precision=args.mixed_precision, initial_scale=2**16, max_norm=args.grad_clip\n )\n elif args.plugin == \"zero2_cpu\":\n plugin = LowLevelZeroPlugin(\n stage=2, precision=args.mixed_precision, initial_scale=2**16, cpu_offload=True, max_norm=args.grad_clip\n )\n elif args.plugin == \"hybrid_parallel\":\n # modify the param accordingly, default configuration is for llama2-7b\n plugin = HybridParallelPlugin(\n tp_size=4,\n pp_size=2,\n num_microbatches=None,\n microbatch_size=1,\n enable_jit_fused=False,\n zero_stage=0,\n precision=\"fp32\",\n initial_scale=1,\n )\n else:\n raise ValueError(f\"Unknown plugin {args.plugin}\")\n\n booster = Booster(plugin=plugin)\n\n use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1\n is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()\n print_flag = (not use_pipeline and coordinator.is_master()) or (use_pipeline and is_pp_last_stage)\n\n # ==============================\n # Initialize Tensorboard\n # ==============================\n if print_flag:\n os.makedirs(args.tensorboard_dir, exist_ok=True)\n writer = SummaryWriter(args.tensorboard_dir)\n\n # ==============================\n # Initialize Model, Optimizer and LR Scheduler\n # ==============================\n\n config = LlamaConfig.from_pretrained(args.model_path)\n # use lazy init when using GeminiPlugin\n init_ctx = (\n LazyInitContext(default_device=get_current_device()) if isinstance(plugin, GeminiPlugin) else nullcontext()\n )\n\n with init_ctx:\n model = LlamaForCausalLM(config)\n\n # ==============================\n # Initialize Tokenizer, Dataset and Dataloader\n # ==============================\n tokenizer = LlamaTokenizer.from_pretrained(\"hf-internal-testing/llama-tokenizer\")\n # follows fast chat: https://github.com/lm-sys/FastChat/blob/main/fastchat/train/train.py#L257\n tokenizer.pad_token = tokenizer.unk_token\n\n dataset = load_dataset(args.dataset, args.task_name)\n train_ds = dataset[\"train\"]\n dataloader = prepare_dataloader(\n train_ds,\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=True,\n collate_fn=partial(tokenize_batch_for_finetune, tokenizer=tokenizer, max_length=args.max_length),\n )\n\n if args.grad_checkpoint:\n model.gradient_checkpointing_enable()\n if args.flash_attention:\n assert SUPPORT_XFORMERS, \"Use flash attention while xfomers is not installed\"\n replace_xformers(model)\n\n model_numel = get_model_numel(model)\n coordinator.print_on_master(f\"Model params: {format_numel_str(model_numel)}\")\n\n optimizer = HybridAdam(model.parameters(), lr=args.lr, betas=(0.9, 0.95), weight_decay=args.weigth_decay)\n total_step = args.num_epochs * len(dataloader)\n lr_scheduler = CosineAnnealingWarmupLR(\n optimizer, total_steps=total_step, warmup_steps=math.ceil(total_step * 0.03), eta_min=0.1 * args.lr\n )\n default_dtype = torch.float16 if args.mixed_precision == \"fp16\" else torch.bfloat16\n torch.set_default_dtype(default_dtype)\n model, optimizer, _, dataloader, lr_scheduler = booster.boost(\n model, optimizer, dataloader=dataloader, lr_scheduler=lr_scheduler\n )\n torch.set_default_dtype(torch.float)\n\n booster.load_model(model, args.model_path)\n\n coordinator.print_on_master(f\"Booster init max CUDA memory: {torch.cuda.max_memory_allocated()/1024**2:.2f} MB\")\n coordinator.print_on_master(\n f\"Booster init max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024:.2f} MB\"\n )\n\n # load checkpoint if specified\n start_epoch = 0\n start_step = 0\n sampler_start_idx = 0\n if args.load is not None:\n coordinator.print_on_master(\"Loading checkpoint\")\n start_epoch, start_step, sampler_start_idx = load(booster, model, optimizer, lr_scheduler, args.load)\n coordinator.print_on_master(f\"Loaded checkpoint {args.load} at epoch {start_epoch} step {start_step}\")\n\n num_steps_per_epoch = len(dataloader)\n\n # if resume training, set the sampler start index to the correct value\n dataloader.sampler.set_start_index(sampler_start_idx)\n for epoch in range(start_epoch, args.num_epochs):\n dataloader.sampler.set_epoch(epoch)\n step_nums = num_steps_per_epoch - start_step\n dataloader_iter = iter(dataloader)\n\n with tqdm(\n range(step_nums),\n desc=f\"Epoch {epoch}\",\n disable=not print_flag,\n total=num_steps_per_epoch,\n initial=start_step,\n ) as pbar:\n for step in pbar:\n if use_pipeline:\n outputs = booster.execute_pipeline(\n dataloader_iter, model, _criterion, optimizer, return_loss=True, return_outputs=True\n )\n loss = outputs[\"loss\"]\n else:\n batch = next(dataloader_iter)\n outputs = model(**batch)\n loss = outputs[0]\n booster.backward(loss, optimizer)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n if not use_pipeline:\n all_reduce_mean(loss)\n if print_flag:\n pbar.set_postfix({\"loss\": loss.item()})\n writer.add_scalar(\"loss\", loss.item(), epoch * num_steps_per_epoch + step)\n\n if args.save_interval > 0 and (step + 1) % args.save_interval == 0:\n coordinator.print_on_master(f\"Saving checkpoint\")\n save(\n booster,\n model,\n optimizer,\n lr_scheduler,\n epoch,\n step + 1,\n args.batch_size,\n coordinator,\n args.save_dir,\n )\n coordinator.print_on_master(f\"Saved checkpoint at epoch {epoch} step {step + 1}\")\n # the continue epochs are not resumed, so we need to reset the sampler start index and start step\n dataloader.sampler.set_start_index(0)\n start_step = 0\n\n coordinator.print_on_master(f\"Max CUDA memory usage: {torch.cuda.max_memory_allocated()/1024**2:.2f} MB\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/language/llama2/finetune.py" } ]
[ { "content": "import argparse\nimport math\nimport os\nimport resource\nfrom contextlib import nullcontext\nfrom functools import partial\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom attn import SUPPORT_XFORMERS, replace_xformers\nfrom data_utils import load_json, prepare_dataloader, save_json\nfrom datasets import load_dataset\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\nfrom transformers.models.llama.configuration_llama import LlamaConfig\nfrom transformers.models.llama.modeling_llama import LlamaForCausalLM\nfrom transformers.models.llama.tokenization_llama import LlamaTokenizer\n\nimport colossalai\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin\nfrom colossalai.cluster import DistCoordinator\nfrom colossalai.lazy import LazyInitContext\nfrom colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR\nfrom colossalai.nn.optimizer import HybridAdam\nfrom colossalai.utils import get_current_device\n\n\ndef get_model_numel(model: nn.Module) -> int:\n return sum(p.numel() for p in model.parameters())\n\n\ndef format_numel_str(numel: int) -> str:\n B = 1024**3\n M = 1024**2\n K = 1024\n if numel >= B:\n return f\"{numel / B:.2f} B\"\n elif numel >= M:\n return f\"{numel / M:.2f} M\"\n elif numel >= K:\n return f\"{numel / K:.2f} K\"\n else:\n return f\"{numel}\"\n\n\ndef tokenize_batch_for_finetune(batch, tokenizer: Optional[LlamaTokenizer] = None, max_length: int = 2048):\n texts = [sample[\"prompt\"] + sample[\"completion\"] for sample in batch]\n data = tokenizer(texts, return_tensors=\"pt\", padding=\"max_length\", truncation=True, max_length=max_length)\n data = {k: v.cuda() for k, v in data.items()}\n data[\"labels\"] = data[\"input_ids\"].clone()\n return data\n\n\ndef all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:\n dist.all_reduce(tensor, op=dist.ReduceOp.SUM)\n tensor = tensor.data\n tensor.div_(dist.get_world_size())\n return tensor\n\n\ndef save(\n booster: Booster,\n model: nn.Module,\n optimizer: Optimizer,\n lr_scheduler: _LRScheduler,\n epoch: int,\n step: int,\n batch_size: int,\n coordinator: DistCoordinator,\n save_dir: str,\n):\n save_dir = os.path.join(save_dir, f\"epoch{epoch}-step{step}\")\n os.makedirs(os.path.join(save_dir, \"model\"), exist_ok=True)\n\n booster.save_model(model, os.path.join(save_dir, \"model\"), shard=True)\n booster.save_optimizer(optimizer, os.path.join(save_dir, \"optimizer\"), shard=True)\n booster.save_lr_scheduler(lr_scheduler, os.path.join(save_dir, \"lr_scheduler\"))\n running_states = {\n \"epoch\": epoch,\n \"step\": step,\n \"sample_start_index\": step * batch_size,\n }\n if coordinator.is_master():\n save_json(running_states, os.path.join(save_dir, \"running_states.json\"))\n\n\ndef load(\n booster: Booster, model: nn.Module, optimizer: Optimizer, lr_scheduler: _LRScheduler, load_dir: str\n) -> Tuple[int, int, int]:\n booster.load_model(model, os.path.join(load_dir, \"model\"))\n booster.load_optimizer(optimizer, os.path.join(load_dir, \"optimizer\"))\n booster.load_lr_scheduler(lr_scheduler, os.path.join(load_dir, \"lr_scheduler\"))\n running_states = load_json(os.path.join(load_dir, \"running_states.json\"))\n return running_states[\"epoch\"], running_states[\"step\"], running_states[\"sample_start_index\"]\n\n\ndef _criterion(outputs, inputs):\n return outputs.loss\n\n\ndef main():\n # ==============================\n # Parse Arguments\n # ==============================\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_path\", type=str, help=\"pretrained checkpoint path, used with mode==finetune\")\n parser.add_argument(\n \"-p\",\n \"--plugin\",\n choices=[\"gemini\", \"gemini_auto\", \"zero2\", \"zero2_cpu\", \"hybrid_parallel\"],\n default=\"gemini\",\n help=\"Choose which plugin to use\",\n )\n parser.add_argument(\"-d\", \"--dataset\", type=str, default=\"yizhongw/self_instruct\", help=\"Data set path\")\n parser.add_argument(\"--task_name\", type=str, default=\"super_natural_instructions\", help=\"task to run\")\n parser.add_argument(\"-e\", \"--num_epochs\", type=int, default=1, help=\"Number of epochs\")\n parser.add_argument(\"-b\", \"--batch_size\", type=int, default=2, help=\"Local batch size\")\n parser.add_argument(\"--lr\", type=float, default=3e-4, help=\"Learning rate\")\n parser.add_argument(\"-w\", \"--weigth_decay\", type=float, default=0.1, help=\"Weight decay\")\n parser.add_argument(\"-g\", \"--grad_checkpoint\", action=\"store_true\", help=\"Use gradient checkpointing\")\n parser.add_argument(\"-l\", \"--max_length\", type=int, default=4096, help=\"Max sequence length\")\n parser.add_argument(\"-x\", \"--mixed_precision\", default=\"fp16\", choices=[\"fp16\", \"bf16\"], help=\"Mixed precision\")\n parser.add_argument(\"-i\", \"--save_interval\", type=int, default=1000, help=\"Save interval\")\n parser.add_argument(\"-o\", \"--save_dir\", type=str, default=\"checkpoint\", help=\"Checkpoint directory\")\n parser.add_argument(\"-f\", \"--load\", type=str, default=None, help=\"Load checkpoint\")\n parser.add_argument(\"--grad_clip\", type=float, default=1.0, help=\"Gradient clipping\")\n parser.add_argument(\"-t\", \"--tensorboard_dir\", type=str, default=\"tb_logs\", help=\"Tensorboard directory\")\n parser.add_argument(\"-a\", \"--flash_attention\", action=\"store_true\", help=\"Use Flash Attention\")\n args = parser.parse_args()\n\n # ==============================\n # Initialize Distributed Training\n # ==============================\n colossalai.launch_from_torch({})\n coordinator = DistCoordinator()\n\n # ==============================\n # Initialize Booster\n # ==============================\n if args.plugin == \"gemini\":\n plugin = GeminiPlugin(precision=args.mixed_precision, initial_scale=2**16, max_norm=args.grad_clip)\n elif args.plugin == \"gemini_auto\":\n plugin = GeminiPlugin(\n precision=args.mixed_precision, placement_policy=\"auto\", initial_scale=2**16, max_norm=args.grad_clip\n )\n elif args.plugin == \"zero2\":\n plugin = LowLevelZeroPlugin(\n stage=2, precision=args.mixed_precision, initial_scale=2**16, max_norm=args.grad_clip\n )\n elif args.plugin == \"zero2_cpu\":\n plugin = LowLevelZeroPlugin(\n stage=2, precision=args.mixed_precision, initial_scale=2**16, cpu_offload=True, max_norm=args.grad_clip\n )\n elif args.plugin == \"hybrid_parallel\":\n # modify the param accordingly, default configuration is for llama2-7b\n plugin = HybridParallelPlugin(\n tp_size=4,\n pp_size=2,\n num_microbatches=None,\n microbatch_size=1,\n enable_jit_fused=False,\n zero_stage=0,\n precision=\"fp32\",\n initial_scale=1,\n )\n else:\n raise ValueError(f\"Unknown plugin {args.plugin}\")\n\n booster = Booster(plugin=plugin)\n\n use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1\n is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()\n print_flag = (not use_pipeline and coordinator.is_master()) or (use_pipeline and is_pp_last_stage)\n\n # ==============================\n # Initialize Tensorboard\n # ==============================\n if print_flag:\n os.makedirs(args.tensorboard_dir, exist_ok=True)\n writer = SummaryWriter(args.tensorboard_dir)\n\n # ==============================\n # Initialize Model, Optimizer and LR Scheduler\n # ==============================\n\n config = LlamaConfig.from_pretrained(args.model_path)\n # use lazy init when using GeminiPlugin\n init_ctx = (\n LazyInitContext(default_device=get_current_device()) if isinstance(plugin, GeminiPlugin) else nullcontext()\n )\n\n with init_ctx:\n model = LlamaForCausalLM(config)\n\n # ==============================\n # Initialize Tokenizer, Dataset and Dataloader\n # ==============================\n tokenizer = LlamaTokenizer.from_pretrained(\"hf-internal-testing/llama-tokenizer\")\n # follows fast chat: https://github.com/lm-sys/FastChat/blob/main/fastchat/train/train.py#L257\n tokenizer.pad_token = tokenizer.unk_token\n\n dataset = load_dataset(args.dataset, args.task_name)\n train_ds = dataset[\"train\"]\n dataloader = prepare_dataloader(\n train_ds,\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=True,\n collate_fn=partial(tokenize_batch_for_finetune, tokenizer=tokenizer, max_length=args.max_length),\n )\n\n if args.grad_checkpoint:\n model.gradient_checkpointing_enable()\n if args.flash_attention:\n assert SUPPORT_XFORMERS, \"Use flash attention while xfomers is not installed\"\n replace_xformers(model)\n\n model_numel = get_model_numel(model)\n coordinator.print_on_master(f\"Model params: {format_numel_str(model_numel)}\")\n\n optimizer = HybridAdam(model.parameters(), lr=args.lr, betas=(0.9, 0.95), weight_decay=args.weigth_decay)\n total_step = args.num_epochs * len(dataloader)\n lr_scheduler = CosineAnnealingWarmupLR(\n optimizer, total_steps=total_step, warmup_steps=math.ceil(total_step * 0.03), eta_min=0.1 * args.lr\n )\n default_dtype = torch.float16 if args.mixed_precision == \"fp16\" else torch.bfloat16\n torch.set_default_dtype(default_dtype)\n model, optimizer, _, dataloader, lr_scheduler = booster.boost(\n model, optimizer, dataloader=dataloader, lr_scheduler=lr_scheduler\n )\n torch.set_default_dtype(torch.float)\n\n booster.load_model(model, args.model_path)\n\n coordinator.print_on_master(f\"Booster init max CUDA memory: {torch.cuda.max_memory_allocated()/1024**2:.2f} MB\")\n coordinator.print_on_master(\n f\"Booster init max CPU memory: {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024:.2f} MB\"\n )\n\n # load checkpoint if specified\n start_epoch = 0\n start_step = 0\n sampler_start_idx = 0\n if args.load is not None:\n coordinator.print_on_master(\"Loading checkpoint\")\n start_epoch, start_step, sampler_start_idx = load(booster, model, optimizer, lr_scheduler, args.load)\n coordinator.print_on_master(f\"Loaded checkpoint {args.load} at epoch {start_epoch} step {start_step}\")\n\n num_steps_per_epoch = len(dataloader)\n\n # if resume training, set the sampler start index to the correct value\n dataloader.sampler.set_start_index(sampler_start_idx)\n for epoch in range(start_epoch, args.num_epochs):\n dataloader.sampler.set_epoch(epoch)\n step_nums = num_steps_per_epoch - start_step\n dataloader_iter = iter(dataloader)\n\n with tqdm(\n range(step_nums),\n desc=f\"Epoch {epoch}\",\n disable=not print_flag,\n total=num_steps_per_epoch,\n initial=start_step,\n ) as pbar:\n for step in pbar:\n if use_pipeline:\n outputs = booster.execute_pipeline(\n dataloader_iter, model, _criterion, optimizer, return_loss=True, return_outputs=True\n )\n loss = outputs[\"loss\"]\n else:\n batch = next(dataloader_iter)\n outputs = model(**batch)\n loss = outputs[0]\n booster.backward(loss, optimizer)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n if not use_pipeline:\n all_reduce_mean(loss)\n if print_flag:\n pbar.set_postfix({\"loss\": loss.item()})\n writer.add_scalar(\"loss\", loss.item(), epoch * num_steps_per_epoch + step)\n\n if args.save_interval > 0 and (step + 1) % args.save_interval == 0:\n coordinator.print_on_master(f\"Saving checkpoint\")\n save(\n booster,\n model,\n optimizer,\n lr_scheduler,\n epoch,\n step + 1,\n args.batch_size,\n coordinator,\n args.save_dir,\n )\n coordinator.print_on_master(f\"Saved checkpoint at epoch {epoch} step {step + 1}\")\n # the continue epochs are not resumed, so we need to reset the sampler start index and start step\n dataloader.sampler.set_start_index(0)\n start_step = 0\n\n coordinator.print_on_master(f\"Max CUDA memory usage: {torch.cuda.max_memory_allocated()/1024**2:.2f} MB\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/language/llama2/finetune.py" } ]
diff --git a/examples/language/llama2/finetune.py b/examples/language/llama2/finetune.py index 33aa1d33e6ba..f7708b1a38ab 100644 --- a/examples/language/llama2/finetune.py +++ b/examples/language/llama2/finetune.py @@ -58,6 +58,7 @@ def tokenize_batch_for_finetune(batch, tokenizer: Optional[LlamaTokenizer] = Non def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor: dist.all_reduce(tensor, op=dist.ReduceOp.SUM) + tensor = tensor.data tensor.div_(dist.get_world_size()) return tensor
enthought__chaco-884
ImportError: cannot import name 'BaseTool' from 'chaco.api' **Problem Description** ImportError: cannot import name 'BaseTool' from 'chaco.api' when running chaco/examples/demo/canvas/mptools.py **Reproduction Steps:** python chaco/examples/demo/canvas/mptools.py **OS, Python version:** [MacOS, python3.11, python3.8 (with EDM)]
[ { "content": "\"\"\"\nA collection of Chaco tools that respond to a multi-pointer interface.\n\"\"\"\nfrom numpy import asarray, dot, sqrt\n\n# Enthought library imports\nfrom traits.api import (\n Delegate,\n Dict,\n Enum,\n Instance,\n Int,\n Property,\n Tuple,\n CArray,\n)\n\n# Chaco imports\nfrom chaco.api import BaseTool\nfrom chaco.chaco_traits import Optional\nfrom chaco.tools.api import PanTool, DragZoom, LegendTool, RangeSelection\n\n\nBOGUS_BLOB_ID = -1\n\n\ndef l2norm(v):\n return sqrt(dot(v, v))\n\n\nclass MPPanTool(PanTool):\n cur_bid = Int(BOGUS_BLOB_ID)\n\n def normal_blob_down(self, event):\n if self.cur_bid == BOGUS_BLOB_ID:\n self.cur_bid = event.bid\n self._start_pan(event, capture_mouse=False)\n event.window.capture_blob(self, event.bid, event.net_transform())\n\n def panning_blob_up(self, event):\n if event.bid == self.cur_bid:\n self.cur_bid = BOGUS_BLOB_ID\n self._end_pan(event)\n\n def panning_blob_move(self, event):\n if event.bid == self.cur_bid:\n self._dispatch_stateful_event(event, \"mouse_move\")\n\n def panning_mouse_leave(self, event):\n \"\"\"Handles the mouse leaving the plot when the tool is in the 'panning'\n state.\n\n Don't end panning.\n \"\"\"\n return\n\n def _end_pan(self, event):\n if hasattr(event, \"bid\"):\n event.window.release_blob(event.bid)\n PanTool._end_pan(self, event)\n\n\nclass MPDragZoom(DragZoom):\n\n speed = 1.0\n\n # The original dataspace points where blobs 1 and 2 went down\n _orig_low = CArray\n _orig_high = CArray\n\n # Dataspace center of the zoom action\n _center_pt = Optional(Tuple)\n\n # Maps blob ID numbers to the (x,y) coordinates that came in.\n _blobs = Dict()\n\n # Maps blob ID numbers to the (x0,y0) coordinates from blob_move events.\n _moves = Dict()\n\n # Properties to convert the dictionaries to map from blob ID numbers to\n # a single coordinate appropriate for the axis the range selects on.\n _axis_blobs = Property(Dict)\n _axis_moves = Property(Dict)\n\n def _convert_to_axis(self, d):\n \"\"\"Convert a mapping of ID to (x,y) tuple to a mapping of ID to just\n the coordinate appropriate for the selected axis.\n \"\"\"\n if self.axis == \"index\":\n idx = self.axis_index\n else:\n idx = 1 - self.axis_index\n d2 = {}\n for id, coords in list(d.items()):\n d2[id] = coords[idx]\n return d2\n\n def _get__axis_blobs(self):\n return self._convert_to_axis(self._blobs)\n\n def _get__axis_moves(self):\n return self._convert_to_axis(self._moves)\n\n def drag_start(self, event, capture_mouse=False):\n bid1, bid2 = sorted(self._moves)\n xy01, xy02 = self._moves[bid1], self._moves[bid2]\n self._orig_low, self._orig_high = list(\n map(asarray, self._map_coordinate_box(xy01, xy02))\n )\n self.orig_center = (self._orig_high + self._orig_low) / 2.0\n self.orig_diag = l2norm(self._orig_high - self._orig_low)\n\n # DragZoom.drag_start(self, event, capture_mouse)\n self._original_xy = xy02\n c = self.component\n self._orig_screen_bounds = ((c.x, c.y), (c.x2, c.y2))\n self._original_data = (\n c.x_mapper.map_data(xy02[0]),\n c.y_mapper.map_data(xy02[1]),\n )\n self._prev_y = xy02[1]\n if capture_mouse:\n event.window.set_pointer(self.drag_pointer)\n\n def normal_blob_down(self, event):\n if len(self._blobs) < 2:\n self._blobs[event.bid] = (event.x, event.y)\n event.window.capture_blob(\n self, event.bid, transform=event.net_transform()\n )\n event.handled = True\n\n def normal_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def normal_blob_move(self, event):\n self._handle_blob_move(event)\n\n def normal_blob_frame_end(self, event):\n if len(self._moves) == 2:\n self.event_state = \"dragging\"\n self.drag_start(event, capture_mouse=False)\n\n def dragging_blob_move(self, event):\n self._handle_blob_move(event)\n\n def dragging_blob_frame_end(self, event):\n # Get dataspace coordinates of the previous and new coordinates\n bid1, bid2 = sorted(self._moves)\n p1, p2 = self._blobs[bid1], self._blobs[bid2]\n low, high = list(map(asarray, self._map_coordinate_box(p1, p2)))\n\n # Compute the amount of translation\n center = (high + low) / 2.0\n translation = center - self.orig_center\n\n # Computing the zoom factor. We have the coordinates of the original\n # blob_down events, and we have a new box as well. For now, just use\n # the relative sizes of the diagonals.\n diag = l2norm(high - low)\n zoom = self.speed * self.orig_diag / diag\n\n # The original screen bounds are used to test if we've reached max_zoom\n orig_screen_low, orig_screen_high = list(\n map(asarray, self._map_coordinate_box(*self._orig_screen_bounds))\n )\n new_low = center - zoom * (center - orig_screen_low) - translation\n new_high = center + zoom * (orig_screen_high - center) - translation\n\n for ndx in (0, 1):\n if self._zoom_limit_reached(\n orig_screen_low[ndx],\n orig_screen_high[ndx],\n new_low[ndx],\n new_high[ndx],\n ):\n return\n\n c = self.component\n c.x_mapper.range.set_bounds(new_low[0], new_high[0])\n c.y_mapper.range.set_bounds(new_low[1], new_high[1])\n\n self.component.request_redraw()\n\n def dragging_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def _handle_blob_move(self, event):\n if event.bid not in self._blobs:\n return\n self._blobs[event.bid] = event.x, event.y\n self._moves[event.bid] = event.x0, event.y0\n event.handled = True\n\n def _handle_blob_leave(self, event):\n if event.bid in self._blobs:\n del self._blobs[event.bid]\n self._moves.pop(event.bid, None)\n event.window.release_blob(event.bid)\n if len(self._blobs) < 2:\n self.event_state = \"normal\"\n\n\nclass MPPanZoom(BaseTool):\n \"\"\"This tool wraps a pan and a zoom tool, and automatically switches\n behavior back and forth depending on how many blobs are tracked on\n screen.\n \"\"\"\n\n pan = Instance(MPPanTool)\n\n zoom = Instance(MPDragZoom)\n\n event_state = Enum(\"normal\", \"pan\", \"zoom\")\n\n _blobs = Delegate(\"zoom\")\n _moves = Delegate(\"zoom\")\n\n def _dispatch_stateful_event(self, event, suffix):\n self.zoom.dispatch(event, suffix)\n event.handled = False\n self.pan.dispatch(event, suffix)\n if len(self._blobs) == 2:\n self.event_state = \"zoom\"\n elif len(self._blobs) == 1:\n self.event_state = \"pan\"\n elif len(self._blobs) == 0:\n self.event_state = \"normal\"\n else:\n assert len(self._blobs) <= 2\n if suffix == \"blob_up\":\n event.window.release_blob(event.bid)\n elif suffix == \"blob_down\":\n event.window.release_blob(event.bid)\n event.window.capture_blob(self, event.bid, event.net_transform())\n event.handled = True\n\n def _component_changed(self, old, new):\n self.pan.component = new\n self.zoom.component = new\n\n def _pan_default(self):\n return MPPanTool(self.component)\n\n def _zoom_default(self):\n return MPDragZoom(self.component)\n\n\nclass MPLegendTool(LegendTool):\n\n event_state = Enum(\"normal\", \"dragging\")\n\n cur_bid = Int(-1)\n\n def normal_blob_down(self, event):\n if self.cur_bid == -1 and self.is_draggable(event.x, event.y):\n self.cur_bid = event.bid\n self.drag_start(event)\n\n def dragging_blob_up(self, event):\n if event.bid == self.cur_bid:\n self.cur_bid = -1\n self.drag_end(event)\n\n def dragging_blob_move(self, event):\n if event.bid == self.cur_bid:\n self.dragging(event)\n\n def drag_start(self, event):\n if self.component:\n self.original_padding = self.component.padding\n if hasattr(event, \"bid\"):\n event.window.capture_blob(\n self, event.bid, event.net_transform()\n )\n else:\n event.window.set_mouse_owner(self, event.net_transform())\n self.mouse_down_position = (event.x, event.y)\n self.event_state = \"dragging\"\n event.handled = True\n\n def drag_end(self, event):\n if hasattr(event, \"bid\"):\n event.window.release_blob(event.bid)\n self.event_state = \"normal\"\n LegendTool.drag_end(self, event)\n\n\nclass MPRangeSelection(RangeSelection):\n\n # Maps blob ID numbers to the (x,y) coordinates that came in.\n _blobs = Dict()\n\n # Maps blob ID numbers to the (x0,y0) coordinates from blob_move events.\n _moves = Dict()\n\n # Properties to convert the dictionaries to map from blob ID numbers to\n # a single coordinate appropriate for the axis the range selects on.\n _axis_blobs = Property(Dict)\n _axis_moves = Property(Dict)\n\n def _convert_to_axis(self, d):\n \"\"\"Convert a mapping of ID to (x,y) tuple to a mapping of ID to just\n the coordinate appropriate for the selected axis.\n \"\"\"\n if self.axis == \"index\":\n idx = self.axis_index\n else:\n idx = 1 - self.axis_index\n d2 = {}\n for id, coords in list(d.items()):\n d2[id] = coords[idx]\n return d2\n\n def _get__axis_blobs(self):\n return self._convert_to_axis(self._blobs)\n\n def _get__axis_moves(self):\n return self._convert_to_axis(self._moves)\n\n def normal_blob_down(self, event):\n if len(self._blobs) < 2:\n self._blobs[event.bid] = (event.x, event.y)\n event.window.capture_blob(\n self, event.bid, transform=event.net_transform()\n )\n event.handled = True\n\n def normal_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def normal_blob_frame_end(self, event):\n if len(self._blobs) == 2:\n self.event_state = \"selecting\"\n # self.drag_start(event, capture_mouse=False)\n # self.selecting_mouse_move(event)\n self._set_sizing_cursor(event)\n self.selection = sorted(self._axis_blobs.values())\n\n def selecting_blob_move(self, event):\n if event.bid in self._blobs:\n self._blobs[event.bid] = event.x, event.y\n self._moves[event.bid] = event.x0, event.y0\n\n def selecting_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def selecting_blob_frame_end(self, event):\n if self.selection is None:\n return\n elif len(self._blobs) == 2:\n axis_index = self.axis_index\n low = self.plot.position[axis_index]\n high = low + self.plot.bounds[axis_index] - 1\n p1, p2 = list(self._axis_blobs.values())\n # XXX: what if p1 or p2 is out of bounds?\n m1 = self.mapper.map_data(p1)\n m2 = self.mapper.map_data(p2)\n low_val = min(m1, m2)\n high_val = max(m1, m2)\n self.selection = (low_val, high_val)\n self.component.request_redraw()\n elif len(self._moves) == 1:\n id, p0 = list(self._axis_moves.items())[0]\n m0 = self.mapper.map_data(p0)\n low, high = self.selection\n if low <= m0 <= high:\n m1 = self.mapper.map_data(self._axis_blobs[id])\n dm = m1 - m0\n self.selection = (low + dm, high + dm)\n\n def selected_blob_down(self, event):\n if len(self._blobs) < 2:\n self._blobs[event.bid] = (event.x, event.y)\n event.window.capture_blob(\n self, event.bid, transform=event.net_transform()\n )\n event.handled = True\n\n def selected_blob_move(self, event):\n if event.bid in self._blobs:\n self._blobs[event.bid] = event.x, event.y\n self._moves[event.bid] = event.x0, event.y0\n\n def selected_blob_frame_end(self, event):\n self.selecting_blob_frame_end(event)\n\n def selected_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def _handle_blob_leave(self, event):\n self._moves.pop(event.bid, None)\n if event.bid in self._blobs:\n del self._blobs[event.bid]\n event.window.release_blob(event.bid)\n\n # Treat the blob leave as a selecting_mouse_up event\n self.selecting_right_up(event)\n\n if len(self._blobs) < 2:\n self.event_state = \"selected\"\n", "path": "examples/demo/canvas/mptools.py" } ]
[ { "content": "\"\"\"\nA collection of Chaco tools that respond to a multi-pointer interface.\n\"\"\"\nfrom numpy import asarray, dot, sqrt\n\n# Enthought library imports\nfrom traits.api import (\n Delegate,\n Dict,\n Enum,\n Instance,\n Int,\n Property,\n Tuple,\n CArray,\n)\n\nfrom enable.api import BaseTool\n\n# Chaco imports\nfrom chaco.chaco_traits import Optional\nfrom chaco.tools.api import PanTool, DragZoom, LegendTool, RangeSelection\n\n\nBOGUS_BLOB_ID = -1\n\n\ndef l2norm(v):\n return sqrt(dot(v, v))\n\n\nclass MPPanTool(PanTool):\n cur_bid = Int(BOGUS_BLOB_ID)\n\n def normal_blob_down(self, event):\n if self.cur_bid == BOGUS_BLOB_ID:\n self.cur_bid = event.bid\n self._start_pan(event, capture_mouse=False)\n event.window.capture_blob(self, event.bid, event.net_transform())\n\n def panning_blob_up(self, event):\n if event.bid == self.cur_bid:\n self.cur_bid = BOGUS_BLOB_ID\n self._end_pan(event)\n\n def panning_blob_move(self, event):\n if event.bid == self.cur_bid:\n self._dispatch_stateful_event(event, \"mouse_move\")\n\n def panning_mouse_leave(self, event):\n \"\"\"Handles the mouse leaving the plot when the tool is in the 'panning'\n state.\n\n Don't end panning.\n \"\"\"\n return\n\n def _end_pan(self, event):\n if hasattr(event, \"bid\"):\n event.window.release_blob(event.bid)\n PanTool._end_pan(self, event)\n\n\nclass MPDragZoom(DragZoom):\n\n speed = 1.0\n\n # The original dataspace points where blobs 1 and 2 went down\n _orig_low = CArray\n _orig_high = CArray\n\n # Dataspace center of the zoom action\n _center_pt = Optional(Tuple)\n\n # Maps blob ID numbers to the (x,y) coordinates that came in.\n _blobs = Dict()\n\n # Maps blob ID numbers to the (x0,y0) coordinates from blob_move events.\n _moves = Dict()\n\n # Properties to convert the dictionaries to map from blob ID numbers to\n # a single coordinate appropriate for the axis the range selects on.\n _axis_blobs = Property(Dict)\n _axis_moves = Property(Dict)\n\n def _convert_to_axis(self, d):\n \"\"\"Convert a mapping of ID to (x,y) tuple to a mapping of ID to just\n the coordinate appropriate for the selected axis.\n \"\"\"\n if self.axis == \"index\":\n idx = self.axis_index\n else:\n idx = 1 - self.axis_index\n d2 = {}\n for id, coords in list(d.items()):\n d2[id] = coords[idx]\n return d2\n\n def _get__axis_blobs(self):\n return self._convert_to_axis(self._blobs)\n\n def _get__axis_moves(self):\n return self._convert_to_axis(self._moves)\n\n def drag_start(self, event, capture_mouse=False):\n bid1, bid2 = sorted(self._moves)\n xy01, xy02 = self._moves[bid1], self._moves[bid2]\n self._orig_low, self._orig_high = list(\n map(asarray, self._map_coordinate_box(xy01, xy02))\n )\n self.orig_center = (self._orig_high + self._orig_low) / 2.0\n self.orig_diag = l2norm(self._orig_high - self._orig_low)\n\n # DragZoom.drag_start(self, event, capture_mouse)\n self._original_xy = xy02\n c = self.component\n self._orig_screen_bounds = ((c.x, c.y), (c.x2, c.y2))\n self._original_data = (\n c.x_mapper.map_data(xy02[0]),\n c.y_mapper.map_data(xy02[1]),\n )\n self._prev_y = xy02[1]\n if capture_mouse:\n event.window.set_pointer(self.drag_pointer)\n\n def normal_blob_down(self, event):\n if len(self._blobs) < 2:\n self._blobs[event.bid] = (event.x, event.y)\n event.window.capture_blob(\n self, event.bid, transform=event.net_transform()\n )\n event.handled = True\n\n def normal_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def normal_blob_move(self, event):\n self._handle_blob_move(event)\n\n def normal_blob_frame_end(self, event):\n if len(self._moves) == 2:\n self.event_state = \"dragging\"\n self.drag_start(event, capture_mouse=False)\n\n def dragging_blob_move(self, event):\n self._handle_blob_move(event)\n\n def dragging_blob_frame_end(self, event):\n # Get dataspace coordinates of the previous and new coordinates\n bid1, bid2 = sorted(self._moves)\n p1, p2 = self._blobs[bid1], self._blobs[bid2]\n low, high = list(map(asarray, self._map_coordinate_box(p1, p2)))\n\n # Compute the amount of translation\n center = (high + low) / 2.0\n translation = center - self.orig_center\n\n # Computing the zoom factor. We have the coordinates of the original\n # blob_down events, and we have a new box as well. For now, just use\n # the relative sizes of the diagonals.\n diag = l2norm(high - low)\n zoom = self.speed * self.orig_diag / diag\n\n # The original screen bounds are used to test if we've reached max_zoom\n orig_screen_low, orig_screen_high = list(\n map(asarray, self._map_coordinate_box(*self._orig_screen_bounds))\n )\n new_low = center - zoom * (center - orig_screen_low) - translation\n new_high = center + zoom * (orig_screen_high - center) - translation\n\n for ndx in (0, 1):\n if self._zoom_limit_reached(\n orig_screen_low[ndx],\n orig_screen_high[ndx],\n new_low[ndx],\n new_high[ndx],\n ):\n return\n\n c = self.component\n c.x_mapper.range.set_bounds(new_low[0], new_high[0])\n c.y_mapper.range.set_bounds(new_low[1], new_high[1])\n\n self.component.request_redraw()\n\n def dragging_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def _handle_blob_move(self, event):\n if event.bid not in self._blobs:\n return\n self._blobs[event.bid] = event.x, event.y\n self._moves[event.bid] = event.x0, event.y0\n event.handled = True\n\n def _handle_blob_leave(self, event):\n if event.bid in self._blobs:\n del self._blobs[event.bid]\n self._moves.pop(event.bid, None)\n event.window.release_blob(event.bid)\n if len(self._blobs) < 2:\n self.event_state = \"normal\"\n\n\nclass MPPanZoom(BaseTool):\n \"\"\"This tool wraps a pan and a zoom tool, and automatically switches\n behavior back and forth depending on how many blobs are tracked on\n screen.\n \"\"\"\n\n pan = Instance(MPPanTool)\n\n zoom = Instance(MPDragZoom)\n\n event_state = Enum(\"normal\", \"pan\", \"zoom\")\n\n _blobs = Delegate(\"zoom\")\n _moves = Delegate(\"zoom\")\n\n def _dispatch_stateful_event(self, event, suffix):\n self.zoom.dispatch(event, suffix)\n event.handled = False\n self.pan.dispatch(event, suffix)\n if len(self._blobs) == 2:\n self.event_state = \"zoom\"\n elif len(self._blobs) == 1:\n self.event_state = \"pan\"\n elif len(self._blobs) == 0:\n self.event_state = \"normal\"\n else:\n assert len(self._blobs) <= 2\n if suffix == \"blob_up\":\n event.window.release_blob(event.bid)\n elif suffix == \"blob_down\":\n event.window.release_blob(event.bid)\n event.window.capture_blob(self, event.bid, event.net_transform())\n event.handled = True\n\n def _component_changed(self, old, new):\n self.pan.component = new\n self.zoom.component = new\n\n def _pan_default(self):\n return MPPanTool(self.component)\n\n def _zoom_default(self):\n return MPDragZoom(self.component)\n\n\nclass MPLegendTool(LegendTool):\n\n event_state = Enum(\"normal\", \"dragging\")\n\n cur_bid = Int(-1)\n\n def normal_blob_down(self, event):\n if self.cur_bid == -1 and self.is_draggable(event.x, event.y):\n self.cur_bid = event.bid\n self.drag_start(event)\n\n def dragging_blob_up(self, event):\n if event.bid == self.cur_bid:\n self.cur_bid = -1\n self.drag_end(event)\n\n def dragging_blob_move(self, event):\n if event.bid == self.cur_bid:\n self.dragging(event)\n\n def drag_start(self, event):\n if self.component:\n self.original_padding = self.component.padding\n if hasattr(event, \"bid\"):\n event.window.capture_blob(\n self, event.bid, event.net_transform()\n )\n else:\n event.window.set_mouse_owner(self, event.net_transform())\n self.mouse_down_position = (event.x, event.y)\n self.event_state = \"dragging\"\n event.handled = True\n\n def drag_end(self, event):\n if hasattr(event, \"bid\"):\n event.window.release_blob(event.bid)\n self.event_state = \"normal\"\n LegendTool.drag_end(self, event)\n\n\nclass MPRangeSelection(RangeSelection):\n\n # Maps blob ID numbers to the (x,y) coordinates that came in.\n _blobs = Dict()\n\n # Maps blob ID numbers to the (x0,y0) coordinates from blob_move events.\n _moves = Dict()\n\n # Properties to convert the dictionaries to map from blob ID numbers to\n # a single coordinate appropriate for the axis the range selects on.\n _axis_blobs = Property(Dict)\n _axis_moves = Property(Dict)\n\n def _convert_to_axis(self, d):\n \"\"\"Convert a mapping of ID to (x,y) tuple to a mapping of ID to just\n the coordinate appropriate for the selected axis.\n \"\"\"\n if self.axis == \"index\":\n idx = self.axis_index\n else:\n idx = 1 - self.axis_index\n d2 = {}\n for id, coords in list(d.items()):\n d2[id] = coords[idx]\n return d2\n\n def _get__axis_blobs(self):\n return self._convert_to_axis(self._blobs)\n\n def _get__axis_moves(self):\n return self._convert_to_axis(self._moves)\n\n def normal_blob_down(self, event):\n if len(self._blobs) < 2:\n self._blobs[event.bid] = (event.x, event.y)\n event.window.capture_blob(\n self, event.bid, transform=event.net_transform()\n )\n event.handled = True\n\n def normal_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def normal_blob_frame_end(self, event):\n if len(self._blobs) == 2:\n self.event_state = \"selecting\"\n # self.drag_start(event, capture_mouse=False)\n # self.selecting_mouse_move(event)\n self._set_sizing_cursor(event)\n self.selection = sorted(self._axis_blobs.values())\n\n def selecting_blob_move(self, event):\n if event.bid in self._blobs:\n self._blobs[event.bid] = event.x, event.y\n self._moves[event.bid] = event.x0, event.y0\n\n def selecting_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def selecting_blob_frame_end(self, event):\n if self.selection is None:\n return\n elif len(self._blobs) == 2:\n axis_index = self.axis_index\n low = self.plot.position[axis_index]\n high = low + self.plot.bounds[axis_index] - 1\n p1, p2 = list(self._axis_blobs.values())\n # XXX: what if p1 or p2 is out of bounds?\n m1 = self.mapper.map_data(p1)\n m2 = self.mapper.map_data(p2)\n low_val = min(m1, m2)\n high_val = max(m1, m2)\n self.selection = (low_val, high_val)\n self.component.request_redraw()\n elif len(self._moves) == 1:\n id, p0 = list(self._axis_moves.items())[0]\n m0 = self.mapper.map_data(p0)\n low, high = self.selection\n if low <= m0 <= high:\n m1 = self.mapper.map_data(self._axis_blobs[id])\n dm = m1 - m0\n self.selection = (low + dm, high + dm)\n\n def selected_blob_down(self, event):\n if len(self._blobs) < 2:\n self._blobs[event.bid] = (event.x, event.y)\n event.window.capture_blob(\n self, event.bid, transform=event.net_transform()\n )\n event.handled = True\n\n def selected_blob_move(self, event):\n if event.bid in self._blobs:\n self._blobs[event.bid] = event.x, event.y\n self._moves[event.bid] = event.x0, event.y0\n\n def selected_blob_frame_end(self, event):\n self.selecting_blob_frame_end(event)\n\n def selected_blob_up(self, event):\n self._handle_blob_leave(event)\n\n def _handle_blob_leave(self, event):\n self._moves.pop(event.bid, None)\n if event.bid in self._blobs:\n del self._blobs[event.bid]\n event.window.release_blob(event.bid)\n\n # Treat the blob leave as a selecting_mouse_up event\n self.selecting_right_up(event)\n\n if len(self._blobs) < 2:\n self.event_state = \"selected\"\n", "path": "examples/demo/canvas/mptools.py" } ]
diff --git a/examples/demo/canvas/mptools.py b/examples/demo/canvas/mptools.py index 77e13a22a..c8f4866e5 100644 --- a/examples/demo/canvas/mptools.py +++ b/examples/demo/canvas/mptools.py @@ -15,8 +15,9 @@ CArray, ) +from enable.api import BaseTool + # Chaco imports -from chaco.api import BaseTool from chaco.chaco_traits import Optional from chaco.tools.api import PanTool, DragZoom, LegendTool, RangeSelection
getredash__redash-3008
GA Data Source throws an error when no rows returned ### Issue Summary Google Analytics Data Source throws `Error running query: 'rows'` when the query result is empty. I have a pretty simple query with dimensions and filters, like: ```json { "ids": "ga:177xxxxxx", "start_date": "2018-10-08", "end_date": "2018-10-12", "metrics": "ga:uniqueEvents", "dimensions": "ga:dimension1,ga:dimension3", "filters": "ga:dimension2==userrole;ga:eventCategory==eventcategory;ga:eventAction==enentaction;ga:dimension1!=demo" } ``` Sometimes it returns empty result as there is no data. This results in error in redash. ### Steps to Reproduce 1. Create the Google Analytics Data Source 2. Make some query returning zero rows 3. Execute it in query editor `Error running query: 'rows'` will be thrown. While this might be considered not a bug, I'd expect just an empty result with no errors. ### Technical details: * Redash Version: 5.0.1 * Browser/OS: Chrome/macOS * How did you install Redash: docker-compose GA Data Source throws an error when no rows returned ### Issue Summary Google Analytics Data Source throws `Error running query: 'rows'` when the query result is empty. I have a pretty simple query with dimensions and filters, like: ```json { "ids": "ga:177xxxxxx", "start_date": "2018-10-08", "end_date": "2018-10-12", "metrics": "ga:uniqueEvents", "dimensions": "ga:dimension1,ga:dimension3", "filters": "ga:dimension2==userrole;ga:eventCategory==eventcategory;ga:eventAction==enentaction;ga:dimension1!=demo" } ``` Sometimes it returns empty result as there is no data. This results in error in redash. ### Steps to Reproduce 1. Create the Google Analytics Data Source 2. Make some query returning zero rows 3. Execute it in query editor `Error running query: 'rows'` will be thrown. While this might be considered not a bug, I'd expect just an empty result with no errors. ### Technical details: * Redash Version: 5.0.1 * Browser/OS: Chrome/macOS * How did you install Redash: docker-compose
[ { "content": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom base64 import b64decode\nfrom datetime import datetime\nfrom urlparse import parse_qs, urlparse\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from oauth2client.service_account import ServiceAccountCredentials\n from apiclient.discovery import build\n from apiclient.errors import HttpError\n import httplib2\n enabled = True\nexcept ImportError as e:\n enabled = False\n\n\ntypes_conv = dict(\n STRING=TYPE_STRING,\n INTEGER=TYPE_INTEGER,\n FLOAT=TYPE_FLOAT,\n DATE=TYPE_DATE,\n DATETIME=TYPE_DATETIME\n)\n\n\ndef parse_ga_response(response):\n columns = []\n for h in response['columnHeaders']:\n if h['name'] in ('ga:date', 'mcf:conversionDate'):\n h['dataType'] = 'DATE'\n elif h['name'] == 'ga:dateHour':\n h['dataType'] = 'DATETIME'\n columns.append({\n 'name': h['name'],\n 'friendly_name': h['name'].split(':', 1)[1],\n 'type': types_conv.get(h['dataType'], 'string')\n })\n\n rows = []\n for r in response['rows']:\n d = {}\n for c, value in enumerate(r):\n column_name = response['columnHeaders'][c]['name']\n column_type = filter(lambda col: col['name'] == column_name, columns)[0]['type']\n\n # mcf results come a bit different than ga results:\n if isinstance(value, dict):\n if 'primitiveValue' in value:\n value = value['primitiveValue']\n elif 'conversionPathValue' in value:\n steps = []\n for step in value['conversionPathValue']:\n steps.append('{}:{}'.format(step['interactionType'], step['nodeValue']))\n value = ', '.join(steps)\n else:\n raise Exception(\"Results format not supported\")\n\n if column_type == TYPE_DATE:\n value = datetime.strptime(value, '%Y%m%d')\n elif column_type == TYPE_DATETIME:\n if len(value) == 10:\n value = datetime.strptime(value, '%Y%m%d%H')\n elif len(value) == 12:\n value = datetime.strptime(value, '%Y%m%d%H%M')\n else:\n raise Exception(\"Unknown date/time format in results: '{}'\".format(value))\n\n d[column_name] = value\n rows.append(d)\n\n return {'columns': columns, 'rows': rows}\n\n\nclass GoogleAnalytics(BaseSQLQueryRunner):\n @classmethod\n def annotate_query(cls):\n return False\n\n @classmethod\n def type(cls):\n return \"google_analytics\"\n\n @classmethod\n def name(cls):\n return \"Google Analytics\"\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'jsonKeyFile': {\n \"type\": \"string\",\n 'title': 'JSON Key File'\n }\n },\n 'required': ['jsonKeyFile'],\n 'secret': ['jsonKeyFile']\n }\n\n def __init__(self, configuration):\n super(GoogleAnalytics, self).__init__(configuration)\n self.syntax = 'json'\n\n def _get_analytics_service(self):\n scope = ['https://www.googleapis.com/auth/analytics.readonly']\n key = json_loads(b64decode(self.configuration['jsonKeyFile']))\n creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)\n return build('analytics', 'v3', http=creds.authorize(httplib2.Http()))\n\n def _get_tables(self, schema):\n accounts = self._get_analytics_service().management().accounts().list().execute().get('items')\n if accounts is None:\n raise Exception(\"Failed getting accounts.\")\n else:\n for account in accounts:\n schema[account['name']] = {'name': account['name'], 'columns': []}\n properties = self._get_analytics_service().management().webproperties().list(\n accountId=account['id']).execute().get('items', [])\n for property_ in properties:\n if 'defaultProfileId' in property_ and 'name' in property_:\n schema[account['name']]['columns'].append(\n u'{0} (ga:{1})'.format(property_['name'], property_['defaultProfileId'])\n )\n\n return schema.values()\n\n def test_connection(self):\n try:\n service = self._get_analytics_service()\n service.management().accounts().list().execute()\n except HttpError as e:\n # Make sure we return a more readable error to the end user\n raise Exception(e._get_reason())\n\n def run_query(self, query, user):\n logger.debug(\"Analytics is about to execute query: %s\", query)\n try:\n params = json_loads(query)\n except:\n params = parse_qs(urlparse(query).query, keep_blank_values=True)\n for key in params.keys():\n params[key] = ','.join(params[key])\n if '-' in key:\n params[key.replace('-', '_')] = params.pop(key)\n\n if 'mcf:' in params['metrics'] and 'ga:' in params['metrics']:\n raise Exception(\"Can't mix mcf: and ga: metrics.\")\n\n if 'mcf:' in params.get('dimensions', '') and 'ga:' in params.get('dimensions', ''):\n raise Exception(\"Can't mix mcf: and ga: dimensions.\")\n\n if 'mcf:' in params['metrics']:\n api = self._get_analytics_service().data().mcf()\n else:\n api = self._get_analytics_service().data().ga()\n\n if len(params) > 0:\n try:\n response = api.get(**params).execute()\n data = parse_ga_response(response)\n error = None\n json_data = json_dumps(data)\n except HttpError as e:\n # Make sure we return a more readable error to the end user\n error = e._get_reason()\n json_data = None\n else:\n error = 'Wrong query format.'\n json_data = None\n return json_data, error\n\n\nregister(GoogleAnalytics)\n", "path": "redash/query_runner/google_analytics.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom base64 import b64decode\nfrom datetime import datetime\nfrom urlparse import parse_qs, urlparse\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from oauth2client.service_account import ServiceAccountCredentials\n from apiclient.discovery import build\n from apiclient.errors import HttpError\n import httplib2\n enabled = True\nexcept ImportError as e:\n enabled = False\n\n\ntypes_conv = dict(\n STRING=TYPE_STRING,\n INTEGER=TYPE_INTEGER,\n FLOAT=TYPE_FLOAT,\n DATE=TYPE_DATE,\n DATETIME=TYPE_DATETIME\n)\n\n\ndef parse_ga_response(response):\n columns = []\n for h in response['columnHeaders']:\n if h['name'] in ('ga:date', 'mcf:conversionDate'):\n h['dataType'] = 'DATE'\n elif h['name'] == 'ga:dateHour':\n h['dataType'] = 'DATETIME'\n columns.append({\n 'name': h['name'],\n 'friendly_name': h['name'].split(':', 1)[1],\n 'type': types_conv.get(h['dataType'], 'string')\n })\n\n rows = []\n for r in response.get('rows', []):\n d = {}\n for c, value in enumerate(r):\n column_name = response['columnHeaders'][c]['name']\n column_type = filter(lambda col: col['name'] == column_name, columns)[0]['type']\n\n # mcf results come a bit different than ga results:\n if isinstance(value, dict):\n if 'primitiveValue' in value:\n value = value['primitiveValue']\n elif 'conversionPathValue' in value:\n steps = []\n for step in value['conversionPathValue']:\n steps.append('{}:{}'.format(step['interactionType'], step['nodeValue']))\n value = ', '.join(steps)\n else:\n raise Exception(\"Results format not supported\")\n\n if column_type == TYPE_DATE:\n value = datetime.strptime(value, '%Y%m%d')\n elif column_type == TYPE_DATETIME:\n if len(value) == 10:\n value = datetime.strptime(value, '%Y%m%d%H')\n elif len(value) == 12:\n value = datetime.strptime(value, '%Y%m%d%H%M')\n else:\n raise Exception(\"Unknown date/time format in results: '{}'\".format(value))\n\n d[column_name] = value\n rows.append(d)\n\n return {'columns': columns, 'rows': rows}\n\n\nclass GoogleAnalytics(BaseSQLQueryRunner):\n @classmethod\n def annotate_query(cls):\n return False\n\n @classmethod\n def type(cls):\n return \"google_analytics\"\n\n @classmethod\n def name(cls):\n return \"Google Analytics\"\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {\n 'jsonKeyFile': {\n \"type\": \"string\",\n 'title': 'JSON Key File'\n }\n },\n 'required': ['jsonKeyFile'],\n 'secret': ['jsonKeyFile']\n }\n\n def __init__(self, configuration):\n super(GoogleAnalytics, self).__init__(configuration)\n self.syntax = 'json'\n\n def _get_analytics_service(self):\n scope = ['https://www.googleapis.com/auth/analytics.readonly']\n key = json_loads(b64decode(self.configuration['jsonKeyFile']))\n creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)\n return build('analytics', 'v3', http=creds.authorize(httplib2.Http()))\n\n def _get_tables(self, schema):\n accounts = self._get_analytics_service().management().accounts().list().execute().get('items')\n if accounts is None:\n raise Exception(\"Failed getting accounts.\")\n else:\n for account in accounts:\n schema[account['name']] = {'name': account['name'], 'columns': []}\n properties = self._get_analytics_service().management().webproperties().list(\n accountId=account['id']).execute().get('items', [])\n for property_ in properties:\n if 'defaultProfileId' in property_ and 'name' in property_:\n schema[account['name']]['columns'].append(\n u'{0} (ga:{1})'.format(property_['name'], property_['defaultProfileId'])\n )\n\n return schema.values()\n\n def test_connection(self):\n try:\n service = self._get_analytics_service()\n service.management().accounts().list().execute()\n except HttpError as e:\n # Make sure we return a more readable error to the end user\n raise Exception(e._get_reason())\n\n def run_query(self, query, user):\n logger.debug(\"Analytics is about to execute query: %s\", query)\n try:\n params = json_loads(query)\n except:\n params = parse_qs(urlparse(query).query, keep_blank_values=True)\n for key in params.keys():\n params[key] = ','.join(params[key])\n if '-' in key:\n params[key.replace('-', '_')] = params.pop(key)\n\n if 'mcf:' in params['metrics'] and 'ga:' in params['metrics']:\n raise Exception(\"Can't mix mcf: and ga: metrics.\")\n\n if 'mcf:' in params.get('dimensions', '') and 'ga:' in params.get('dimensions', ''):\n raise Exception(\"Can't mix mcf: and ga: dimensions.\")\n\n if 'mcf:' in params['metrics']:\n api = self._get_analytics_service().data().mcf()\n else:\n api = self._get_analytics_service().data().ga()\n\n if len(params) > 0:\n try:\n response = api.get(**params).execute()\n data = parse_ga_response(response)\n error = None\n json_data = json_dumps(data)\n except HttpError as e:\n # Make sure we return a more readable error to the end user\n error = e._get_reason()\n json_data = None\n else:\n error = 'Wrong query format.'\n json_data = None\n return json_data, error\n\n\nregister(GoogleAnalytics)\n", "path": "redash/query_runner/google_analytics.py" } ]
diff --git a/redash/query_runner/google_analytics.py b/redash/query_runner/google_analytics.py index e8b70eb01f..71be522015 100644 --- a/redash/query_runner/google_analytics.py +++ b/redash/query_runner/google_analytics.py @@ -43,7 +43,7 @@ def parse_ga_response(response): }) rows = [] - for r in response['rows']: + for r in response.get('rows', []): d = {} for c, value in enumerate(r): column_name = response['columnHeaders'][c]['name']
docker__docker-py-1167
Feature Request: docker.from_env(version='auto') Feature request to add auto api version support for `docker.from_env()` similar to `docker.Client(version='auto')`? I noticed that one of the suggestions from #402 for the `version='auto'` option was now available for `docker.Client()` but doesn't work for `docker.from_env()`.
[ { "content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport struct\nfrom functools import partial\n\nimport requests\nimport requests.exceptions\nimport six\nimport websocket\n\n\nfrom . import api\nfrom . import constants\nfrom . import errors\nfrom .auth import auth\nfrom .ssladapter import ssladapter\nfrom .tls import TLSConfig\nfrom .transport import UnixAdapter\nfrom .utils import utils, check_resource, update_headers, kwargs_from_env\nfrom .utils.socket import frames_iter\ntry:\n from .transport import NpipeAdapter\nexcept ImportError:\n pass\n\n\ndef from_env(**kwargs):\n return Client.from_env(**kwargs)\n\n\nclass Client(\n requests.Session,\n api.BuildApiMixin,\n api.ContainerApiMixin,\n api.DaemonApiMixin,\n api.ExecApiMixin,\n api.ImageApiMixin,\n api.NetworkApiMixin,\n api.ServiceApiMixin,\n api.SwarmApiMixin,\n api.VolumeApiMixin):\n def __init__(self, base_url=None, version=None,\n timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,\n user_agent=constants.DEFAULT_USER_AGENT):\n super(Client, self).__init__()\n\n if tls and not base_url:\n raise errors.TLSParameterError(\n 'If using TLS, the base_url argument must be provided.'\n )\n\n self.base_url = base_url\n self.timeout = timeout\n self.headers['User-Agent'] = user_agent\n\n self._auth_configs = auth.load_config()\n\n base_url = utils.parse_host(\n base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)\n )\n if base_url.startswith('http+unix://'):\n self._custom_adapter = UnixAdapter(base_url, timeout)\n self.mount('http+docker://', self._custom_adapter)\n self.base_url = 'http+docker://localunixsocket'\n elif base_url.startswith('npipe://'):\n if not constants.IS_WINDOWS_PLATFORM:\n raise errors.DockerException(\n 'The npipe:// protocol is only supported on Windows'\n )\n try:\n self._custom_adapter = NpipeAdapter(base_url, timeout)\n except NameError:\n raise errors.DockerException(\n 'Install pypiwin32 package to enable npipe:// support'\n )\n self.mount('http+docker://', self._custom_adapter)\n self.base_url = 'http+docker://localnpipe'\n else:\n # Use SSLAdapter for the ability to specify SSL version\n if isinstance(tls, TLSConfig):\n tls.configure_client(self)\n elif tls:\n self._custom_adapter = ssladapter.SSLAdapter()\n self.mount('https://', self._custom_adapter)\n self.base_url = base_url\n\n # version detection needs to be after unix adapter mounting\n if version is None:\n self._version = constants.DEFAULT_DOCKER_API_VERSION\n elif isinstance(version, six.string_types):\n if version.lower() == 'auto':\n self._version = self._retrieve_server_version()\n else:\n self._version = version\n else:\n raise errors.DockerException(\n 'Version parameter must be a string or None. Found {0}'.format(\n type(version).__name__\n )\n )\n\n @classmethod\n def from_env(cls, **kwargs):\n return cls(**kwargs_from_env(**kwargs))\n\n def _retrieve_server_version(self):\n try:\n return self.version(api_version=False)[\"ApiVersion\"]\n except KeyError:\n raise errors.DockerException(\n 'Invalid response from docker daemon: key \"ApiVersion\"'\n ' is missing.'\n )\n except Exception as e:\n raise errors.DockerException(\n 'Error while fetching server API version: {0}'.format(e)\n )\n\n def _set_request_timeout(self, kwargs):\n \"\"\"Prepare the kwargs for an HTTP request by inserting the timeout\n parameter, if not already present.\"\"\"\n kwargs.setdefault('timeout', self.timeout)\n return kwargs\n\n @update_headers\n def _post(self, url, **kwargs):\n return self.post(url, **self._set_request_timeout(kwargs))\n\n @update_headers\n def _get(self, url, **kwargs):\n return self.get(url, **self._set_request_timeout(kwargs))\n\n @update_headers\n def _put(self, url, **kwargs):\n return self.put(url, **self._set_request_timeout(kwargs))\n\n @update_headers\n def _delete(self, url, **kwargs):\n return self.delete(url, **self._set_request_timeout(kwargs))\n\n def _url(self, pathfmt, *args, **kwargs):\n for arg in args:\n if not isinstance(arg, six.string_types):\n raise ValueError(\n 'Expected a string but found {0} ({1}) '\n 'instead'.format(arg, type(arg))\n )\n\n quote_f = partial(six.moves.urllib.parse.quote_plus, safe=\"/:\")\n args = map(quote_f, args)\n\n if kwargs.get('versioned_api', True):\n return '{0}/v{1}{2}'.format(\n self.base_url, self._version, pathfmt.format(*args)\n )\n else:\n return '{0}{1}'.format(self.base_url, pathfmt.format(*args))\n\n def _raise_for_status(self, response, explanation=None):\n \"\"\"Raises stored :class:`APIError`, if one occurred.\"\"\"\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n if e.response.status_code == 404:\n raise errors.NotFound(e, response, explanation=explanation)\n raise errors.APIError(e, response, explanation=explanation)\n\n def _result(self, response, json=False, binary=False):\n assert not (json and binary)\n self._raise_for_status(response)\n\n if json:\n return response.json()\n if binary:\n return response.content\n return response.text\n\n def _post_json(self, url, data, **kwargs):\n # Go <1.1 can't unserialize null to a string\n # so we do this disgusting thing here.\n data2 = {}\n if data is not None:\n for k, v in six.iteritems(data):\n if v is not None:\n data2[k] = v\n\n if 'headers' not in kwargs:\n kwargs['headers'] = {}\n kwargs['headers']['Content-Type'] = 'application/json'\n return self._post(url, data=json.dumps(data2), **kwargs)\n\n def _attach_params(self, override=None):\n return override or {\n 'stdout': 1,\n 'stderr': 1,\n 'stream': 1\n }\n\n @check_resource\n def _attach_websocket(self, container, params=None):\n url = self._url(\"/containers/{0}/attach/ws\", container)\n req = requests.Request(\"POST\", url, params=self._attach_params(params))\n full_url = req.prepare().url\n full_url = full_url.replace(\"http://\", \"ws://\", 1)\n full_url = full_url.replace(\"https://\", \"wss://\", 1)\n return self._create_websocket_connection(full_url)\n\n def _create_websocket_connection(self, url):\n return websocket.create_connection(url)\n\n def _get_raw_response_socket(self, response):\n self._raise_for_status(response)\n if six.PY3:\n sock = response.raw._fp.fp.raw\n if self.base_url.startswith(\"https://\"):\n sock = sock._sock\n else:\n sock = response.raw._fp.fp._sock\n try:\n # Keep a reference to the response to stop it being garbage\n # collected. If the response is garbage collected, it will\n # close TLS sockets.\n sock._response = response\n except AttributeError:\n # UNIX sockets can't have attributes set on them, but that's\n # fine because we won't be doing TLS over them\n pass\n\n return sock\n\n def _stream_helper(self, response, decode=False):\n \"\"\"Generator for data coming from a chunked-encoded HTTP response.\"\"\"\n if response.raw._fp.chunked:\n reader = response.raw\n while not reader.closed:\n # this read call will block until we get a chunk\n data = reader.read(1)\n if not data:\n break\n if reader._fp.chunk_left:\n data += reader.read(reader._fp.chunk_left)\n if decode:\n if six.PY3:\n data = data.decode('utf-8')\n # remove the trailing newline\n data = data.strip()\n # split the data at any newlines\n data_list = data.split(\"\\r\\n\")\n # load and yield each line seperately\n for data in data_list:\n data = json.loads(data)\n yield data\n else:\n yield data\n else:\n # Response isn't chunked, meaning we probably\n # encountered an error immediately\n yield self._result(response, json=decode)\n\n def _multiplexed_buffer_helper(self, response):\n \"\"\"A generator of multiplexed data blocks read from a buffered\n response.\"\"\"\n buf = self._result(response, binary=True)\n walker = 0\n while True:\n if len(buf[walker:]) < 8:\n break\n _, length = struct.unpack_from('>BxxxL', buf[walker:])\n start = walker + constants.STREAM_HEADER_SIZE_BYTES\n end = start + length\n walker = end\n yield buf[start:end]\n\n def _multiplexed_response_stream_helper(self, response):\n \"\"\"A generator of multiplexed data blocks coming from a response\n stream.\"\"\"\n\n # Disable timeout on the underlying socket to prevent\n # Read timed out(s) for long running processes\n socket = self._get_raw_response_socket(response)\n self._disable_socket_timeout(socket)\n\n while True:\n header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)\n if not header:\n break\n _, length = struct.unpack('>BxxxL', header)\n if not length:\n continue\n data = response.raw.read(length)\n if not data:\n break\n yield data\n\n def _stream_raw_result_old(self, response):\n ''' Stream raw output for API versions below 1.6 '''\n self._raise_for_status(response)\n for line in response.iter_lines(chunk_size=1,\n decode_unicode=True):\n # filter out keep-alive new lines\n if line:\n yield line\n\n def _stream_raw_result(self, response):\n ''' Stream result for TTY-enabled container above API 1.6 '''\n self._raise_for_status(response)\n for out in response.iter_content(chunk_size=1, decode_unicode=True):\n yield out\n\n def _read_from_socket(self, response, stream):\n socket = self._get_raw_response_socket(response)\n\n if stream:\n return frames_iter(socket)\n else:\n return six.binary_type().join(frames_iter(socket))\n\n def _disable_socket_timeout(self, socket):\n \"\"\" Depending on the combination of python version and whether we're\n connecting over http or https, we might need to access _sock, which\n may or may not exist; or we may need to just settimeout on socket\n itself, which also may or may not have settimeout on it. To avoid\n missing the correct one, we try both.\n\n We also do not want to set the timeout if it is already disabled, as\n you run the risk of changing a socket that was non-blocking to\n blocking, for example when using gevent.\n \"\"\"\n sockets = [socket, getattr(socket, '_sock', None)]\n\n for s in sockets:\n if not hasattr(s, 'settimeout'):\n continue\n\n timeout = -1\n\n if hasattr(s, 'gettimeout'):\n timeout = s.gettimeout()\n\n # Don't change the timeout if it is already disabled.\n if timeout is None or timeout == 0.0:\n continue\n\n s.settimeout(None)\n\n def _get_result(self, container, stream, res):\n cont = self.inspect_container(container)\n return self._get_result_tty(stream, res, cont['Config']['Tty'])\n\n def _get_result_tty(self, stream, res, is_tty):\n # Stream multi-plexing was only introduced in API v1.6. Anything\n # before that needs old-style streaming.\n if utils.compare_version('1.6', self._version) < 0:\n return self._stream_raw_result_old(res)\n\n # We should also use raw streaming (without keep-alives)\n # if we're dealing with a tty-enabled container.\n if is_tty:\n return self._stream_raw_result(res) if stream else \\\n self._result(res, binary=True)\n\n self._raise_for_status(res)\n sep = six.binary_type()\n if stream:\n return self._multiplexed_response_stream_helper(res)\n else:\n return sep.join(\n [x for x in self._multiplexed_buffer_helper(res)]\n )\n\n def get_adapter(self, url):\n try:\n return super(Client, self).get_adapter(url)\n except requests.exceptions.InvalidSchema as e:\n if self._custom_adapter:\n return self._custom_adapter\n else:\n raise e\n\n @property\n def api_version(self):\n return self._version\n\n\nclass AutoVersionClient(Client):\n def __init__(self, *args, **kwargs):\n if 'version' in kwargs and kwargs['version']:\n raise errors.DockerException(\n 'Can not specify version for AutoVersionClient'\n )\n kwargs['version'] = 'auto'\n super(AutoVersionClient, self).__init__(*args, **kwargs)\n", "path": "docker/client.py" } ]
[ { "content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport struct\nfrom functools import partial\n\nimport requests\nimport requests.exceptions\nimport six\nimport websocket\n\n\nfrom . import api\nfrom . import constants\nfrom . import errors\nfrom .auth import auth\nfrom .ssladapter import ssladapter\nfrom .tls import TLSConfig\nfrom .transport import UnixAdapter\nfrom .utils import utils, check_resource, update_headers, kwargs_from_env\nfrom .utils.socket import frames_iter\ntry:\n from .transport import NpipeAdapter\nexcept ImportError:\n pass\n\n\ndef from_env(**kwargs):\n return Client.from_env(**kwargs)\n\n\nclass Client(\n requests.Session,\n api.BuildApiMixin,\n api.ContainerApiMixin,\n api.DaemonApiMixin,\n api.ExecApiMixin,\n api.ImageApiMixin,\n api.NetworkApiMixin,\n api.ServiceApiMixin,\n api.SwarmApiMixin,\n api.VolumeApiMixin):\n def __init__(self, base_url=None, version=None,\n timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,\n user_agent=constants.DEFAULT_USER_AGENT):\n super(Client, self).__init__()\n\n if tls and not base_url:\n raise errors.TLSParameterError(\n 'If using TLS, the base_url argument must be provided.'\n )\n\n self.base_url = base_url\n self.timeout = timeout\n self.headers['User-Agent'] = user_agent\n\n self._auth_configs = auth.load_config()\n\n base_url = utils.parse_host(\n base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)\n )\n if base_url.startswith('http+unix://'):\n self._custom_adapter = UnixAdapter(base_url, timeout)\n self.mount('http+docker://', self._custom_adapter)\n self.base_url = 'http+docker://localunixsocket'\n elif base_url.startswith('npipe://'):\n if not constants.IS_WINDOWS_PLATFORM:\n raise errors.DockerException(\n 'The npipe:// protocol is only supported on Windows'\n )\n try:\n self._custom_adapter = NpipeAdapter(base_url, timeout)\n except NameError:\n raise errors.DockerException(\n 'Install pypiwin32 package to enable npipe:// support'\n )\n self.mount('http+docker://', self._custom_adapter)\n self.base_url = 'http+docker://localnpipe'\n else:\n # Use SSLAdapter for the ability to specify SSL version\n if isinstance(tls, TLSConfig):\n tls.configure_client(self)\n elif tls:\n self._custom_adapter = ssladapter.SSLAdapter()\n self.mount('https://', self._custom_adapter)\n self.base_url = base_url\n\n # version detection needs to be after unix adapter mounting\n if version is None:\n self._version = constants.DEFAULT_DOCKER_API_VERSION\n elif isinstance(version, six.string_types):\n if version.lower() == 'auto':\n self._version = self._retrieve_server_version()\n else:\n self._version = version\n else:\n raise errors.DockerException(\n 'Version parameter must be a string or None. Found {0}'.format(\n type(version).__name__\n )\n )\n\n @classmethod\n def from_env(cls, **kwargs):\n version = kwargs.pop('version', None)\n return cls(version=version, **kwargs_from_env(**kwargs))\n\n def _retrieve_server_version(self):\n try:\n return self.version(api_version=False)[\"ApiVersion\"]\n except KeyError:\n raise errors.DockerException(\n 'Invalid response from docker daemon: key \"ApiVersion\"'\n ' is missing.'\n )\n except Exception as e:\n raise errors.DockerException(\n 'Error while fetching server API version: {0}'.format(e)\n )\n\n def _set_request_timeout(self, kwargs):\n \"\"\"Prepare the kwargs for an HTTP request by inserting the timeout\n parameter, if not already present.\"\"\"\n kwargs.setdefault('timeout', self.timeout)\n return kwargs\n\n @update_headers\n def _post(self, url, **kwargs):\n return self.post(url, **self._set_request_timeout(kwargs))\n\n @update_headers\n def _get(self, url, **kwargs):\n return self.get(url, **self._set_request_timeout(kwargs))\n\n @update_headers\n def _put(self, url, **kwargs):\n return self.put(url, **self._set_request_timeout(kwargs))\n\n @update_headers\n def _delete(self, url, **kwargs):\n return self.delete(url, **self._set_request_timeout(kwargs))\n\n def _url(self, pathfmt, *args, **kwargs):\n for arg in args:\n if not isinstance(arg, six.string_types):\n raise ValueError(\n 'Expected a string but found {0} ({1}) '\n 'instead'.format(arg, type(arg))\n )\n\n quote_f = partial(six.moves.urllib.parse.quote_plus, safe=\"/:\")\n args = map(quote_f, args)\n\n if kwargs.get('versioned_api', True):\n return '{0}/v{1}{2}'.format(\n self.base_url, self._version, pathfmt.format(*args)\n )\n else:\n return '{0}{1}'.format(self.base_url, pathfmt.format(*args))\n\n def _raise_for_status(self, response, explanation=None):\n \"\"\"Raises stored :class:`APIError`, if one occurred.\"\"\"\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n if e.response.status_code == 404:\n raise errors.NotFound(e, response, explanation=explanation)\n raise errors.APIError(e, response, explanation=explanation)\n\n def _result(self, response, json=False, binary=False):\n assert not (json and binary)\n self._raise_for_status(response)\n\n if json:\n return response.json()\n if binary:\n return response.content\n return response.text\n\n def _post_json(self, url, data, **kwargs):\n # Go <1.1 can't unserialize null to a string\n # so we do this disgusting thing here.\n data2 = {}\n if data is not None:\n for k, v in six.iteritems(data):\n if v is not None:\n data2[k] = v\n\n if 'headers' not in kwargs:\n kwargs['headers'] = {}\n kwargs['headers']['Content-Type'] = 'application/json'\n return self._post(url, data=json.dumps(data2), **kwargs)\n\n def _attach_params(self, override=None):\n return override or {\n 'stdout': 1,\n 'stderr': 1,\n 'stream': 1\n }\n\n @check_resource\n def _attach_websocket(self, container, params=None):\n url = self._url(\"/containers/{0}/attach/ws\", container)\n req = requests.Request(\"POST\", url, params=self._attach_params(params))\n full_url = req.prepare().url\n full_url = full_url.replace(\"http://\", \"ws://\", 1)\n full_url = full_url.replace(\"https://\", \"wss://\", 1)\n return self._create_websocket_connection(full_url)\n\n def _create_websocket_connection(self, url):\n return websocket.create_connection(url)\n\n def _get_raw_response_socket(self, response):\n self._raise_for_status(response)\n if six.PY3:\n sock = response.raw._fp.fp.raw\n if self.base_url.startswith(\"https://\"):\n sock = sock._sock\n else:\n sock = response.raw._fp.fp._sock\n try:\n # Keep a reference to the response to stop it being garbage\n # collected. If the response is garbage collected, it will\n # close TLS sockets.\n sock._response = response\n except AttributeError:\n # UNIX sockets can't have attributes set on them, but that's\n # fine because we won't be doing TLS over them\n pass\n\n return sock\n\n def _stream_helper(self, response, decode=False):\n \"\"\"Generator for data coming from a chunked-encoded HTTP response.\"\"\"\n if response.raw._fp.chunked:\n reader = response.raw\n while not reader.closed:\n # this read call will block until we get a chunk\n data = reader.read(1)\n if not data:\n break\n if reader._fp.chunk_left:\n data += reader.read(reader._fp.chunk_left)\n if decode:\n if six.PY3:\n data = data.decode('utf-8')\n # remove the trailing newline\n data = data.strip()\n # split the data at any newlines\n data_list = data.split(\"\\r\\n\")\n # load and yield each line seperately\n for data in data_list:\n data = json.loads(data)\n yield data\n else:\n yield data\n else:\n # Response isn't chunked, meaning we probably\n # encountered an error immediately\n yield self._result(response)\n\n def _multiplexed_buffer_helper(self, response):\n \"\"\"A generator of multiplexed data blocks read from a buffered\n response.\"\"\"\n buf = self._result(response, binary=True)\n walker = 0\n while True:\n if len(buf[walker:]) < 8:\n break\n _, length = struct.unpack_from('>BxxxL', buf[walker:])\n start = walker + constants.STREAM_HEADER_SIZE_BYTES\n end = start + length\n walker = end\n yield buf[start:end]\n\n def _multiplexed_response_stream_helper(self, response):\n \"\"\"A generator of multiplexed data blocks coming from a response\n stream.\"\"\"\n\n # Disable timeout on the underlying socket to prevent\n # Read timed out(s) for long running processes\n socket = self._get_raw_response_socket(response)\n self._disable_socket_timeout(socket)\n\n while True:\n header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)\n if not header:\n break\n _, length = struct.unpack('>BxxxL', header)\n if not length:\n continue\n data = response.raw.read(length)\n if not data:\n break\n yield data\n\n def _stream_raw_result_old(self, response):\n ''' Stream raw output for API versions below 1.6 '''\n self._raise_for_status(response)\n for line in response.iter_lines(chunk_size=1,\n decode_unicode=True):\n # filter out keep-alive new lines\n if line:\n yield line\n\n def _stream_raw_result(self, response):\n ''' Stream result for TTY-enabled container above API 1.6 '''\n self._raise_for_status(response)\n for out in response.iter_content(chunk_size=1, decode_unicode=True):\n yield out\n\n def _read_from_socket(self, response, stream):\n socket = self._get_raw_response_socket(response)\n\n if stream:\n return frames_iter(socket)\n else:\n return six.binary_type().join(frames_iter(socket))\n\n def _disable_socket_timeout(self, socket):\n \"\"\" Depending on the combination of python version and whether we're\n connecting over http or https, we might need to access _sock, which\n may or may not exist; or we may need to just settimeout on socket\n itself, which also may or may not have settimeout on it. To avoid\n missing the correct one, we try both.\n\n We also do not want to set the timeout if it is already disabled, as\n you run the risk of changing a socket that was non-blocking to\n blocking, for example when using gevent.\n \"\"\"\n sockets = [socket, getattr(socket, '_sock', None)]\n\n for s in sockets:\n if not hasattr(s, 'settimeout'):\n continue\n\n timeout = -1\n\n if hasattr(s, 'gettimeout'):\n timeout = s.gettimeout()\n\n # Don't change the timeout if it is already disabled.\n if timeout is None or timeout == 0.0:\n continue\n\n s.settimeout(None)\n\n def _get_result(self, container, stream, res):\n cont = self.inspect_container(container)\n return self._get_result_tty(stream, res, cont['Config']['Tty'])\n\n def _get_result_tty(self, stream, res, is_tty):\n # Stream multi-plexing was only introduced in API v1.6. Anything\n # before that needs old-style streaming.\n if utils.compare_version('1.6', self._version) < 0:\n return self._stream_raw_result_old(res)\n\n # We should also use raw streaming (without keep-alives)\n # if we're dealing with a tty-enabled container.\n if is_tty:\n return self._stream_raw_result(res) if stream else \\\n self._result(res, binary=True)\n\n self._raise_for_status(res)\n sep = six.binary_type()\n if stream:\n return self._multiplexed_response_stream_helper(res)\n else:\n return sep.join(\n [x for x in self._multiplexed_buffer_helper(res)]\n )\n\n def get_adapter(self, url):\n try:\n return super(Client, self).get_adapter(url)\n except requests.exceptions.InvalidSchema as e:\n if self._custom_adapter:\n return self._custom_adapter\n else:\n raise e\n\n @property\n def api_version(self):\n return self._version\n\n\nclass AutoVersionClient(Client):\n def __init__(self, *args, **kwargs):\n if 'version' in kwargs and kwargs['version']:\n raise errors.DockerException(\n 'Can not specify version for AutoVersionClient'\n )\n kwargs['version'] = 'auto'\n super(AutoVersionClient, self).__init__(*args, **kwargs)\n", "path": "docker/client.py" } ]
diff --git a/docker/client.py b/docker/client.py index 758675369..dc28ac46c 100644 --- a/docker/client.py +++ b/docker/client.py @@ -114,7 +114,8 @@ def __init__(self, base_url=None, version=None, @classmethod def from_env(cls, **kwargs): - return cls(**kwargs_from_env(**kwargs)) + version = kwargs.pop('version', None) + return cls(version=version, **kwargs_from_env(**kwargs)) def _retrieve_server_version(self): try: diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py index b21f1d6ae..6ceb8cbbc 100644 --- a/tests/unit/client_test.py +++ b/tests/unit/client_test.py @@ -25,6 +25,14 @@ def test_from_env(self): client = Client.from_env() self.assertEqual(client.base_url, "https://192.168.59.103:2376") + def test_from_env_with_version(self): + os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', + DOCKER_CERT_PATH=TEST_CERT_DIR, + DOCKER_TLS_VERIFY='1') + client = Client.from_env(version='2.32') + self.assertEqual(client.base_url, "https://192.168.59.103:2376") + self.assertEqual(client._version, '2.32') + class DisableSocketTest(base.BaseTestCase): class DummySocket(object):
mathesar-foundation__mathesar-1707
Explorations should not auto-save New Explorations are currently persistent, any change made immediately saves the exploration. This behaviour is not preferred since we'd like the user to be able to run and discard queries. [Mail thread containing related discussion](https://groups.google.com/a/mathesar.org/g/mathesar-developers/c/RQJSiDQu1Tg/m/uLHj30yFAgAJ). New behaviour proposed: * New Exploration: Auto-save is not preferred - User opens Data Explorer - User joins tables, does any number of operations - This should not get saved automatically - It should get saved when user manually clicks Save button * Editing existing Exploration: ~~Auto-save is preferred~~ Auto save is not preferred (Refer https://github.com/centerofci/mathesar/issues/1590#issuecomment-1238204655) - Users edits an existing exploration in the Data Explorer - User makes changes to it - ~~The changes are auto-saved~~ User has to click the Save button or Ctrl+s to save the changes - We have undo-redo to improve the user's editing experience Implement Exploration Page functionality This is a placeholder issue to implement a page to view a single exploration.
[ { "content": "from django.shortcuts import render, redirect, get_object_or_404\n\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\nfrom mathesar.api.serializers.queries import QuerySerializer\nfrom mathesar.database.types import UIType\nfrom mathesar.models.query import UIQuery\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_queries_list(request, schema):\n if schema is None:\n return []\n query_serializer = QuerySerializer(\n UIQuery.objects.all(),\n many=True,\n context={'request': request}\n )\n return query_serializer.data\n\n\ndef get_ui_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n UIType,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database)\n }\n\n\ndef get_current_database(request, db_name):\n # if there's a DB name passed in, try to retrieve the database, or return a 404 error.\n if db_name is not None:\n return get_object_or_404(Database, name=db_name)\n else:\n try:\n # Try to get the first database available\n return Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n return None\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\ndef home(request):\n database = get_current_database(request, None)\n return redirect('schemas', db_name=database.name)\n\n\ndef schema_home(request, db_name, schema_id, **kwargs):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n", "path": "mathesar/views.py" } ]
[ { "content": "from django.shortcuts import render, redirect, get_object_or_404\n\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\nfrom mathesar.api.serializers.queries import QuerySerializer\nfrom mathesar.database.types import UIType\nfrom mathesar.models.query import UIQuery\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_queries_list(request, schema):\n if schema is None:\n return []\n query_serializer = QuerySerializer(\n UIQuery.objects.filter(base_table__schema=schema),\n many=True,\n context={'request': request}\n )\n return query_serializer.data\n\n\ndef get_ui_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n UIType,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database)\n }\n\n\ndef get_current_database(request, db_name):\n # if there's a DB name passed in, try to retrieve the database, or return a 404 error.\n if db_name is not None:\n return get_object_or_404(Database, name=db_name)\n else:\n try:\n # Try to get the first database available\n return Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n return None\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\ndef home(request):\n database = get_current_database(request, None)\n return redirect('schemas', db_name=database.name)\n\n\ndef schema_home(request, db_name, schema_id, **kwargs):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n", "path": "mathesar/views.py" } ]
diff --git a/mathesar/views.py b/mathesar/views.py index 753441223b..8b45f7f1a9 100644 --- a/mathesar/views.py +++ b/mathesar/views.py @@ -42,7 +42,7 @@ def get_queries_list(request, schema): if schema is None: return [] query_serializer = QuerySerializer( - UIQuery.objects.all(), + UIQuery.objects.filter(base_table__schema=schema), many=True, context={'request': request} ) diff --git a/mathesar_ui/src/api/queries/queryList.ts b/mathesar_ui/src/api/queries.ts similarity index 66% rename from mathesar_ui/src/api/queries/queryList.ts rename to mathesar_ui/src/api/queries.ts index 9ab20ac360..e3f8834157 100644 --- a/mathesar_ui/src/api/queries/queryList.ts +++ b/mathesar_ui/src/api/queries.ts @@ -1,13 +1,16 @@ import type { PaginatedResponse } from '@mathesar/utils/api'; import type { Column } from '@mathesar/api/tables/columns'; import type { JpPath } from '@mathesar/api/tables/joinable_tables'; +import type { SchemaEntry } from '@mathesar/AppTypes'; + +export type QueryColumnAlias = string; /** * endpoint: /api/db/v0/queries/<query_id>/ */ export interface QueryInstanceInitialColumn { - alias: string; + alias: QueryColumnAlias; id: Column['id']; jp_path?: JpPath; display_name: string; @@ -53,6 +56,10 @@ export interface QueryInstance { readonly transformations?: QueryInstanceTransformation[]; } +export interface QueryGetResponse extends QueryInstance { + readonly schema: number; +} + /** * endpoint: /api/db/v0/queries/ */ @@ -78,3 +85,35 @@ export interface QueryResultColumn { } export type QueryResultColumns = QueryResultColumn[]; + +/** + * endpoint: /api/db/v0/queries/<query_id>/run/ + */ + +export interface QueryRunRequest { + base_table: QueryInstance['base_table']; + initial_columns: QueryInstanceInitialColumn[]; + transformations?: QueryInstanceTransformation[]; + parameters: { + order_by?: { + field: QueryColumnAlias; + direction: 'asc' | 'desc'; + }[]; + limit: number; + offset: number; + }; +} + +export type QueryColumnMetaData = QueryResultColumn; + +export interface QueryRunResponse { + query: { + schema: SchemaEntry['id']; + base_table: QueryInstance['base_table']; + initial_columns: QueryInstanceInitialColumn[]; + transformations?: QueryInstanceTransformation[]; + }; + records: QueryResultRecords; + output_columns: QueryColumnAlias[]; + column_metadata: Record<string, QueryColumnMetaData>; +} diff --git a/mathesar_ui/src/pages/database/AddEditSchemaModalForm.svelte b/mathesar_ui/src/components/NameAndDescInputModalForm.svelte similarity index 89% rename from mathesar_ui/src/pages/database/AddEditSchemaModalForm.svelte rename to mathesar_ui/src/components/NameAndDescInputModalForm.svelte index 92fb1f441f..ee82b00482 100644 --- a/mathesar_ui/src/pages/database/AddEditSchemaModalForm.svelte +++ b/mathesar_ui/src/components/NameAndDescInputModalForm.svelte @@ -1,4 +1,13 @@ <script lang="ts"> + /** + * This component is currently used for objects where name and description + * need to be entered by the user. This is used in places such as: + * - Adding/Editing schema + * - Saving Exploration + * + * A more common modal-form component can be created by utilizing FormBuilder + * if such a need arises in the future. + */ import { tick } from 'svelte'; import type { ModalController } from '@mathesar-component-library'; import { diff --git a/mathesar_ui/src/components/QueryName.svelte b/mathesar_ui/src/components/QueryName.svelte index 5a0636fc47..52516da34b 100644 --- a/mathesar_ui/src/components/QueryName.svelte +++ b/mathesar_ui/src/components/QueryName.svelte @@ -1,5 +1,5 @@ <script lang="ts"> - import type { QueryInstance } from '@mathesar/api/queries/queryList'; + import type { QueryInstance } from '@mathesar/api/queries'; import { iconExploration } from '@mathesar/icons'; import NameWithIcon from './NameWithIcon.svelte'; diff --git a/mathesar_ui/src/components/SaveStatusIndicator.svelte b/mathesar_ui/src/components/SaveStatusIndicator.svelte index a229b2ce95..dc5326b536 100644 --- a/mathesar_ui/src/components/SaveStatusIndicator.svelte +++ b/mathesar_ui/src/components/SaveStatusIndicator.svelte @@ -16,7 +16,7 @@ class:error={status === 'failure'} > {#if !status} - *Unsaved + *Has unsaved changes {:else if status === 'success'} All changes saved {:else if status === 'processing'} diff --git a/mathesar_ui/src/components/breadcrumb/EntitySelector.svelte b/mathesar_ui/src/components/breadcrumb/EntitySelector.svelte index 9f84c51a1c..ada1e65fab 100644 --- a/mathesar_ui/src/components/breadcrumb/EntitySelector.svelte +++ b/mathesar_ui/src/components/breadcrumb/EntitySelector.svelte @@ -7,12 +7,12 @@ import type { TableEntry } from '@mathesar/api/tables'; import { getTablePageUrl, - getDataExplorerPageUrl, + getExplorationPageUrl, } from '@mathesar/routes/urls'; import type { Database, SchemaEntry } from '@mathesar/AppTypes'; import { iconTable } from '@mathesar/icons'; import { queries as queriesStore } from '@mathesar/stores/queries'; - import type { QueryInstance } from '@mathesar/api/queries/queryList'; + import type { QueryInstance } from '@mathesar/api/queries'; import BreadcrumbSelector from './BreadcrumbSelector.svelte'; import type { BreadcrumbSelectorEntry, @@ -46,11 +46,11 @@ return { type: 'simple', label: queryInstance.name, - href: getDataExplorerPageUrl(database.name, schema.id, queryInstance.id), + href: getExplorationPageUrl(database.name, schema.id, queryInstance.id), icon: iconTable, isActive() { // TODO we don't have a store for what the current query is, so we fallback to comparing hrefs. - const entryhref = getDataExplorerPageUrl( + const entryhref = getExplorationPageUrl( database.name, schema.id, queryInstance.id, diff --git a/mathesar_ui/src/components/routing/EventfulRoute.svelte b/mathesar_ui/src/components/routing/EventfulRoute.svelte index 58ad63b67e..dfab775b33 100644 --- a/mathesar_ui/src/components/routing/EventfulRoute.svelte +++ b/mathesar_ui/src/components/routing/EventfulRoute.svelte @@ -7,6 +7,6 @@ </script> <Route {path} {firstmatch} let:meta> - <Observer {meta} on:routeUpdated on:routeLoaded /> + <Observer {meta} on:load on:unload /> <slot {meta} /> </Route> diff --git a/mathesar_ui/src/components/routing/MultiPathRoute.svelte b/mathesar_ui/src/components/routing/MultiPathRoute.svelte new file mode 100644 index 0000000000..1d56f0f1f4 --- /dev/null +++ b/mathesar_ui/src/components/routing/MultiPathRoute.svelte @@ -0,0 +1,47 @@ +<script lang="ts"> + import { tick } from 'svelte'; + import type { TinroRouteMeta } from 'tinro'; + import EventfulRoute from './EventfulRoute.svelte'; + import type { RoutePath } from './utils'; + + export let paths: RoutePath[]; + + let currentPath: + | { + routePath: RoutePath; + meta: TinroRouteMeta; + } + | undefined; + + function setPath(path: RoutePath, _meta: TinroRouteMeta) { + currentPath = { + routePath: path, + meta: _meta, + }; + } + + async function clearPath(path: RoutePath) { + /** + * This is important. + * This function body should only execute after a tick, + * once the next path without paths is loaded. + */ + await tick(); + if (currentPath?.routePath === path) { + currentPath = undefined; + } + } +</script> + +{#each paths as rp (rp.name)} + <EventfulRoute + path={rp.path} + on:load={(e) => setPath(rp, e.detail)} + on:unload={() => clearPath(rp)} + firstmatch + /> +{/each} + +{#if currentPath} + <slot meta={currentPath.meta} path={currentPath.routePath.name} /> +{/if} diff --git a/mathesar_ui/src/components/routing/Observer.svelte b/mathesar_ui/src/components/routing/Observer.svelte index 1a36043b04..c0c08b3541 100644 --- a/mathesar_ui/src/components/routing/Observer.svelte +++ b/mathesar_ui/src/components/routing/Observer.svelte @@ -1,19 +1,25 @@ <script lang="ts"> - import { onMount, createEventDispatcher } from 'svelte'; + import { createEventDispatcher, onMount } from 'svelte'; import type { TinroRouteMeta } from 'tinro'; const dispatch = createEventDispatcher<{ - routeUpdated: TinroRouteMeta; - routeLoaded: TinroRouteMeta; + load: TinroRouteMeta; + update: TinroRouteMeta; + unload: undefined; }>(); export let meta: TinroRouteMeta; - $: if ($meta) { - dispatch('routeUpdated', $meta); - } - onMount(() => { - dispatch('routeLoaded', $meta); + const unsubsriber = $meta.subscribe((metaInfo) => { + if (metaInfo) { + dispatch('load', metaInfo); + } + }); + + return () => { + unsubsriber(); + dispatch('unload'); + }; }); </script> diff --git a/mathesar_ui/src/components/routing/utils.ts b/mathesar_ui/src/components/routing/utils.ts new file mode 100644 index 0000000000..8b3218fbd3 --- /dev/null +++ b/mathesar_ui/src/components/routing/utils.ts @@ -0,0 +1 @@ +export type RoutePath = { name: string; path: string }; diff --git a/mathesar_ui/src/components/sheet/Sheet.svelte b/mathesar_ui/src/components/sheet/Sheet.svelte index 27f5542cf8..968d984ce7 100644 --- a/mathesar_ui/src/components/sheet/Sheet.svelte +++ b/mathesar_ui/src/components/sheet/Sheet.svelte @@ -117,6 +117,15 @@ z-index: 5; } + :global([data-sheet-element='cell'][data-cell-control='true']) { + font-size: var(--text-size-x-small); + padding: 0 1.5rem; + color: var(--color-text-muted); + display: inline-flex; + align-items: center; + height: 100%; + } + :global([data-sheet-element='row']) { transition: all 0.2s cubic-bezier(0, 0, 0.2, 1); } diff --git a/mathesar_ui/src/components/sheet/SheetCell.svelte b/mathesar_ui/src/components/sheet/SheetCell.svelte index 0b4427f91e..fb70966943 100644 --- a/mathesar_ui/src/components/sheet/SheetCell.svelte +++ b/mathesar_ui/src/components/sheet/SheetCell.svelte @@ -11,10 +11,12 @@ $: styleMap = $columnStyleMap.get(columnIdentifierKey); export let isStatic = false; + export let isControlCell = false; $: htmlAttributes = { 'data-sheet-element': 'cell', 'data-cell-static': isStatic ? true : undefined, + 'data-cell-control': isControlCell ? true : undefined, }; </script> diff --git a/mathesar_ui/src/icons.ts b/mathesar_ui/src/icons.ts index 1f2c9e1953..9da34295e7 100644 --- a/mathesar_ui/src/icons.ts +++ b/mathesar_ui/src/icons.ts @@ -47,6 +47,7 @@ import { faUnlink, faUpload, faUser, + faSave, } from '@fortawesome/free-solid-svg-icons'; import type { IconProps } from '@mathesar-component-library/types'; @@ -84,6 +85,7 @@ export const iconSortAscending: IconProps = { data: faSortAmountDownAlt }; export const iconSortDescending: IconProps = { data: faSortAmountDown }; export const iconUndo: IconProps = { data: faUndo }; export const iconUnlink: IconProps = { data: faUnlink }; +export const iconSave: IconProps = { data: faSave }; // THINGS // diff --git a/mathesar_ui/src/pages/data-explorer/DataExplorerPage.svelte b/mathesar_ui/src/pages/data-explorer/DataExplorerPage.svelte index 9e1c416724..b68a2b6d9e 100644 --- a/mathesar_ui/src/pages/data-explorer/DataExplorerPage.svelte +++ b/mathesar_ui/src/pages/data-explorer/DataExplorerPage.svelte @@ -1,21 +1,13 @@ <script lang="ts"> - import { router } from 'tinro'; - import type { Database, SchemaEntry } from '@mathesar/AppTypes'; + import type { SchemaEntry } from '@mathesar/AppTypes'; import LayoutWithHeader from '@mathesar/layouts/LayoutWithHeader.svelte'; - import QueryBuilder from '@mathesar/systems/query-builder/QueryBuilder.svelte'; - import type QueryManager from '@mathesar/systems/query-builder/QueryManager'; - import { getSchemaPageUrl } from '@mathesar/routes/urls'; + import { DataExplorer } from '@mathesar/systems/data-explorer'; + import type { QueryManager } from '@mathesar/systems/data-explorer/types'; - export let database: Database; export let schema: SchemaEntry; export let queryManager: QueryManager; $: ({ query } = queryManager); - - function gotoSchema() { - const schemaURL = getSchemaPageUrl(database.name, schema.id); - router.goto(schemaURL); - } </script> <svelte:head> @@ -23,5 +15,5 @@ </svelte:head> <LayoutWithHeader fitViewport> - <QueryBuilder {queryManager} on:close={gotoSchema} /> + <DataExplorer {queryManager} /> </LayoutWithHeader> diff --git a/mathesar_ui/src/pages/database/AddEditSchemaModal.svelte b/mathesar_ui/src/pages/database/AddEditSchemaModal.svelte index cce21d7b2d..c6e3cea26e 100644 --- a/mathesar_ui/src/pages/database/AddEditSchemaModal.svelte +++ b/mathesar_ui/src/pages/database/AddEditSchemaModal.svelte @@ -6,7 +6,7 @@ createSchema, updateSchema, } from '@mathesar/stores/schemas'; - import AddEditSchemaModalForm from '@mathesar/pages/database/AddEditSchemaModalForm.svelte'; + import NameAndDescInputModalForm from '@mathesar/components/NameAndDescInputModalForm.svelte'; import Identifier from '@mathesar/components/Identifier.svelte'; import { toast } from '@mathesar/stores/toast'; @@ -48,7 +48,7 @@ } </script> -<AddEditSchemaModalForm +<NameAndDescInputModalForm {controller} {save} {getNameValidationErrors} @@ -62,4 +62,4 @@ Create Schema {/if} </span> -</AddEditSchemaModalForm> +</NameAndDescInputModalForm> diff --git a/mathesar_ui/src/pages/exploration/ActionsPane.svelte b/mathesar_ui/src/pages/exploration/ActionsPane.svelte new file mode 100644 index 0000000000..ac8bfd97f1 --- /dev/null +++ b/mathesar_ui/src/pages/exploration/ActionsPane.svelte @@ -0,0 +1,104 @@ +<script lang="ts"> + import { router } from 'tinro'; + import type { Database, SchemaEntry } from '@mathesar/AppTypes'; + import { Button, Icon, iconError } from '@mathesar-component-library'; + import QueryName from '@mathesar/components/QueryName.svelte'; + import EntityType from '@mathesar/components/EntityType.svelte'; + import { confirmDelete } from '@mathesar/stores/confirmation'; + import { iconDelete, iconEdit, iconRefresh } from '@mathesar/icons'; + import type { QueryRunner } from '@mathesar/systems/data-explorer/types'; + import type { QueryInstance } from '@mathesar/api/queries'; + import { + getSchemaPageUrl, + getExplorationEditorPageUrl, + } from '@mathesar/routes/urls'; + import { deleteQuery } from '@mathesar/stores/queries'; + + export let database: Database; + export let schema: SchemaEntry; + export let query: QueryInstance; + export let queryRunner: QueryRunner; + + $: ({ runState } = queryRunner); + $: isLoading = $runState?.state === 'processing'; + $: isError = $runState?.state === 'failure'; + + function handleDeleteTable() { + void confirmDelete({ + identifierType: 'Table', + onProceed: async () => { + await deleteQuery(query.id); + router.goto(getSchemaPageUrl(database.name, schema.id)); + }, + }); + } +</script> + +<div class="actions-pane"> + <div class="heading"> + <EntityType>Exploration</EntityType> + <h1><QueryName {query} /></h1> + </div> + <a href={getExplorationEditorPageUrl(database.name, schema.id, query.id)}> + <Button> + <Icon {...iconEdit} /> + <span>Edit</span> + </Button> + </a> + <Button disabled={isLoading} size="medium" on:click={handleDeleteTable}> + <Icon {...iconDelete} /> + <span>Delete</span> + </Button> + <div class="loading-info"> + <Button + size="medium" + disabled={isLoading} + on:click={() => queryRunner.run()} + > + <Icon + {...isError && !isLoading ? iconError : iconRefresh} + spin={isLoading} + /> + <span> + {#if isLoading} + Loading + {:else if isError} + Retry + {:else} + Refresh + {/if} + </span> + </Button> + </div> +</div> + +<!-- + This currently duplicates styles from table actions page. + TODO: Make ActionsPage a common layout component +--> +<style lang="scss"> + .actions-pane { + border-bottom: 1px solid var(--color-gray-dark); + background-color: var(--color-white); + position: relative; + display: flex; + align-items: center; + gap: 0.5rem; + padding-right: 1rem; + } + .heading { + display: flex; + flex-direction: column; + border-right: 1px solid var(--color-gray-medium); + padding: 1rem; + margin-right: 0.5rem; + } + .heading h1 { + font-size: var(--text-size-x-large); + font-weight: 500; + margin-bottom: 0; + } + .loading-info { + margin-left: auto; + } +</style> diff --git a/mathesar_ui/src/pages/exploration/ExplorationPage.svelte b/mathesar_ui/src/pages/exploration/ExplorationPage.svelte new file mode 100644 index 0000000000..8b2ee18964 --- /dev/null +++ b/mathesar_ui/src/pages/exploration/ExplorationPage.svelte @@ -0,0 +1,50 @@ +<script lang="ts"> + import type { Database, SchemaEntry } from '@mathesar/AppTypes'; + import LayoutWithHeader from '@mathesar/layouts/LayoutWithHeader.svelte'; + import { + ExplorationResult, + QueryModel, + QueryRunner, + } from '@mathesar/systems/data-explorer'; + import type { QueryInstance } from '@mathesar/api/queries'; + import { currentDbAbstractTypes } from '@mathesar/stores/abstract-types'; + import type { AbstractTypesMap } from '@mathesar/stores/abstract-types/types'; + import ActionsPane from './ActionsPane.svelte'; + + export let database: Database; + export let schema: SchemaEntry; + export let query: QueryInstance; + + let queryRunner: QueryRunner | undefined; + + function createQueryRunner( + _query: QueryInstance, + abstractTypesMap: AbstractTypesMap, + ) { + queryRunner?.destroy(); + queryRunner = new QueryRunner(new QueryModel(_query), abstractTypesMap); + } + + $: createQueryRunner(query, $currentDbAbstractTypes.data); +</script> + +<svelte:head> + <title>{query.name} | {schema.name} | Mathesar</title> +</svelte:head> + +<LayoutWithHeader fitViewport> + {#if queryRunner} + <div class="exploration-page"> + <ActionsPane {query} {queryRunner} {database} {schema} /> + <ExplorationResult {queryRunner} /> + </div> + {/if} +</LayoutWithHeader> + +<style lang="scss"> + .exploration-page { + display: grid; + grid-template: auto 1fr / 1fr; + height: 100%; + } +</style> diff --git a/mathesar_ui/src/pages/schema/SchemaPage.svelte b/mathesar_ui/src/pages/schema/SchemaPage.svelte index f25dcfcbeb..39f8f3beca 100644 --- a/mathesar_ui/src/pages/schema/SchemaPage.svelte +++ b/mathesar_ui/src/pages/schema/SchemaPage.svelte @@ -8,6 +8,7 @@ import LayoutWithHeader from '@mathesar/layouts/LayoutWithHeader.svelte'; import { getTablePageUrl, + getExplorationPageUrl, getDataExplorerPageUrl, getImportPageUrl, getImportPreviewPageUrl, @@ -19,6 +20,12 @@ export let database: Database; export let schema: SchemaEntry; + /** + * This property will be used for the latest design changes + * Based on the subroute, the desired tab/section will be selected + */ + export let section = 'overview'; + $: tablesMap = $tablesStore.data; $: queriesMap = $queries.data; @@ -91,7 +98,7 @@ <ul class="entity-list"> {#each [...queriesMap.values()] as query (query.id)} <li class="entity-list-item"> - <a href={getDataExplorerPageUrl(database.name, schema.id, query.id)}> + <a href={getExplorationPageUrl(database.name, schema.id, query.id)}> <QueryName {query} /> </a> </li> diff --git a/mathesar_ui/src/routes/DataExplorerRoute.svelte b/mathesar_ui/src/routes/DataExplorerRoute.svelte index 8e6c8c81de..5a28d57a1f 100644 --- a/mathesar_ui/src/routes/DataExplorerRoute.svelte +++ b/mathesar_ui/src/routes/DataExplorerRoute.svelte @@ -1,35 +1,36 @@ <script lang="ts"> - import { readable } from 'svelte/store'; import { router } from 'tinro'; - import type { TinroRouteMeta } from 'tinro'; - import type { Database, SchemaEntry } from '@mathesar/AppTypes'; - import EventfulRoute from '@mathesar/components/routing/EventfulRoute.svelte'; - import QueryManager from '@mathesar/systems/query-builder/QueryManager'; - import QueryModel from '@mathesar/systems/query-builder/QueryModel'; - import { queries, getQuery } from '@mathesar/stores/queries'; + import { + QueryManager, + QueryModel, + constructQueryModelFromTerseSummarizationHash, + } from '@mathesar/systems/data-explorer'; + import { getQuery } from '@mathesar/stores/queries'; import { currentDbAbstractTypes } from '@mathesar/stores/abstract-types'; import type { CancellablePromise } from '@mathesar/component-library'; - import type { QueryInstance } from '@mathesar/api/queries/queryList'; + import type { QueryInstance } from '@mathesar/api/queries'; import type { UnsavedQueryInstance } from '@mathesar/stores/queries'; - import { getAvailableName } from '@mathesar/utils/db'; import DataExplorerPage from '@mathesar/pages/data-explorer/DataExplorerPage.svelte'; import ErrorPage from '@mathesar/pages/ErrorPage.svelte'; - import { getDataExplorerPageUrl } from '@mathesar/routes/urls'; - import { constructQueryModelFromTerseSummarizationHash } from '@mathesar/systems/query-builder/urlSerializationUtils'; + import { + getDataExplorerPageUrl, + getExplorationEditorPageUrl, + } from '@mathesar/routes/urls'; import AppendBreadcrumb from '@mathesar/components/breadcrumb/AppendBreadcrumb.svelte'; import { iconExploration } from '@mathesar/icons'; + import { readable } from 'svelte/store'; export let database: Database; export let schema: SchemaEntry; + export let queryId: number | undefined; let is404 = false; let queryManager: QueryManager | undefined; let queryLoadPromise: CancellablePromise<QueryInstance>; - $: queryStore = queryManager ? queryManager.query : readable(undefined); - $: query = $queryStore; + $: ({ query } = queryManager ?? { query: readable(undefined) }); function createQueryManager(queryInstance: UnsavedQueryInstance) { queryManager?.destroy(); @@ -40,7 +41,7 @@ is404 = false; queryManager.on('save', async (instance) => { try { - const url = getDataExplorerPageUrl( + const url = getExplorationEditorPageUrl( database.name, schema.id, instance.id, @@ -66,45 +67,35 @@ // An unsaved query is already open return; } - let newQueryModel = { - name: getAvailableName( - 'New_Exploration', - new Set([...$queries.data.values()].map((e) => e.name)), - ), - }; const { hash } = $router; if (hash) { try { - newQueryModel = { - ...newQueryModel, - ...constructQueryModelFromTerseSummarizationHash(hash), - }; + const newQueryModel = + constructQueryModelFromTerseSummarizationHash(hash); router.location.hash.clear(); createQueryManager(newQueryModel); - queryManager?.save(); return; } catch { // fail silently console.error('Unable to create query model from hash', hash); } } - createQueryManager(newQueryModel); + createQueryManager({}); } - async function loadSavedQuery(meta: TinroRouteMeta) { - const queryId = parseInt(meta.params.queryId, 10); - if (Number.isNaN(queryId)) { + async function loadSavedQuery(_queryId: number) { + if (Number.isNaN(_queryId)) { removeQueryManager(); return; } - if (queryManager && queryManager.getQueryModel().id === queryId) { + if (queryManager && queryManager.getQueryModel().id === _queryId) { // The requested query is already open return; } queryLoadPromise?.cancel(); - queryLoadPromise = getQuery(queryId); + queryLoadPromise = getQuery(_queryId); try { const queryInstance = await queryLoadPromise; createQueryManager(queryInstance); @@ -113,32 +104,42 @@ removeQueryManager(); } } -</script> -<AppendBreadcrumb - item={{ - type: 'simple', - href: getDataExplorerPageUrl(database.name, schema.id, query?.id), - label: query?.name || 'Data Explorer', - icon: iconExploration, - }} -/> + function createOrLoadQuery(_queryId?: number) { + if (_queryId) { + void loadSavedQuery(_queryId); + } else { + createNewQuery(); + } + } + + $: createOrLoadQuery(queryId); +</script> -<EventfulRoute - path="/:queryId" - on:routeUpdated={(e) => loadSavedQuery(e.detail)} - on:routeLoaded={(e) => loadSavedQuery(e.detail)} -/> -<EventfulRoute - path="/" - on:routeUpdated={createNewQuery} - on:routeLoaded={createNewQuery} -/> +{#if $query?.id} + <AppendBreadcrumb + item={{ + type: 'simple', + href: getExplorationEditorPageUrl(database.name, schema.id, $query.id), + label: $query?.name ? `Edit: ${$query?.name}` : 'Data Explorer', + icon: iconExploration, + }} + /> +{:else} + <AppendBreadcrumb + item={{ + type: 'simple', + href: getDataExplorerPageUrl(database.name, schema.id), + label: 'Data Explorer', + icon: iconExploration, + }} + /> +{/if} <!--TODO: Add loading state--> {#if queryManager} - <DataExplorerPage {database} {schema} {queryManager} /> + <DataExplorerPage {schema} {queryManager} /> {:else if is404} <ErrorPage>Exploration not found.</ErrorPage> {/if} diff --git a/mathesar_ui/src/routes/ExplorationRoute.svelte b/mathesar_ui/src/routes/ExplorationRoute.svelte new file mode 100644 index 0000000000..ff83905eee --- /dev/null +++ b/mathesar_ui/src/routes/ExplorationRoute.svelte @@ -0,0 +1,32 @@ +<script lang="ts"> + import type { Database, SchemaEntry } from '@mathesar/AppTypes'; + import AppendBreadcrumb from '@mathesar/components/breadcrumb/AppendBreadcrumb.svelte'; + import { getExplorationPageUrl } from '@mathesar/routes/urls'; + import { iconExploration } from '@mathesar/icons'; + import ExplorationPage from '@mathesar/pages/exploration/ExplorationPage.svelte'; + import { queries } from '@mathesar/stores/queries'; + import ErrorPage from '@mathesar/pages/ErrorPage.svelte'; + + export let database: Database; + export let schema: SchemaEntry; + export let queryId: number; + + $: query = $queries.data.get(queryId); +</script> + +{#if query} + <AppendBreadcrumb + item={{ + type: 'simple', + href: getExplorationPageUrl(database.name, schema.id, queryId), + label: query?.name ?? 'Exploration', + icon: iconExploration, + }} + /> + + <ExplorationPage {database} {schema} {query} /> +{:else if Number.isNaN(queryId)} + <ErrorPage>The specified URL is not found.</ErrorPage> +{:else} + <ErrorPage>Table with id {queryId} not found.</ErrorPage> +{/if} diff --git a/mathesar_ui/src/routes/SchemaRoute.svelte b/mathesar_ui/src/routes/SchemaRoute.svelte index a9c223657e..09055a9776 100644 --- a/mathesar_ui/src/routes/SchemaRoute.svelte +++ b/mathesar_ui/src/routes/SchemaRoute.svelte @@ -7,9 +7,11 @@ import SchemaPage from '@mathesar/pages/schema/SchemaPage.svelte'; import { currentSchemaId, schemas } from '@mathesar/stores/schemas'; import AppendBreadcrumb from '@mathesar/components/breadcrumb/AppendBreadcrumb.svelte'; + import MultiPathRoute from '@mathesar/components/routing/MultiPathRoute.svelte'; import DataExplorerRoute from './DataExplorerRoute.svelte'; import TableRoute from './TableRoute.svelte'; import ImportRoute from './ImportRoute.svelte'; + import ExplorationRoute from './ExplorationRoute.svelte'; export let database: Database; export let schemaId: number; @@ -27,25 +29,53 @@ {#if schema} <AppendBreadcrumb item={{ type: 'schema', database, schema }} /> - <Route path="/"> - <SchemaPage {database} {schema} /> - </Route> - - <Route path="/import/*"> + <Route path="/import/*" firstmatch> <ImportRoute {database} {schema} /> </Route> - <Route path="/data-explorer/*"> - <DataExplorerRoute {database} {schema} /> - </Route> - - <Route path="/:tableId/*" let:meta firstmatch> + <Route path="/tables/:tableId/*" let:meta firstmatch> <TableRoute {database} {schema} tableId={parseInt(meta.params.tableId, 10)} /> </Route> + + <MultiPathRoute + paths={[ + { name: 'edit-exploration', path: '/explorations/edit/:queryId' }, + { name: 'new-exploration', path: '/data-explorer/' }, + ]} + let:path + let:meta + > + <DataExplorerRoute + {database} + {schema} + queryId={path === 'edit-exploration' + ? parseInt(meta.params.queryId, 10) + : undefined} + /> + </MultiPathRoute> + + <Route path="/explorations/:queryId" let:meta firstmatch> + <ExplorationRoute + {database} + {schema} + queryId={parseInt(meta.params.queryId, 10)} + /> + </Route> + + <MultiPathRoute + paths={[ + { name: 'tables', path: '/tables/' }, + { name: 'explorations', path: '/explorations/' }, + { name: 'overview', path: '/' }, + ]} + let:path + > + <SchemaPage {database} {schema} section={path} /> + </MultiPathRoute> {:else} <ErrorPage>Schema not found.</ErrorPage> {/if} diff --git a/mathesar_ui/src/routes/urls.ts b/mathesar_ui/src/routes/urls.ts index ef16259698..76b60e465d 100644 --- a/mathesar_ui/src/routes/urls.ts +++ b/mathesar_ui/src/routes/urls.ts @@ -27,20 +27,32 @@ export function getImportPreviewPageUrl( export function getDataExplorerPageUrl( databaseName: string, schemaId: number, - queryId?: number, ): string { - if (queryId !== undefined) { - return `/${databaseName}/${schemaId}/data-explorer/${queryId}/`; - } return `/${databaseName}/${schemaId}/data-explorer/`; } +export function getExplorationPageUrl( + databaseName: string, + schemaId: number, + queryId: number, +): string { + return `/${databaseName}/${schemaId}/explorations/${queryId}/`; +} + +export function getExplorationEditorPageUrl( + databaseName: string, + schemaId: number, + queryId: number, +): string { + return `/${databaseName}/${schemaId}/explorations/edit/${queryId}/`; +} + export function getTablePageUrl( databaseName: string, schemaId: number, tableId: number, ): string { - return `/${databaseName}/${schemaId}/${tableId}/`; + return `/${databaseName}/${schemaId}/tables/${tableId}/`; } export function getRecordPageUrl( @@ -49,5 +61,5 @@ export function getRecordPageUrl( tableId: number, recordId: unknown, ): string { - return `/${databaseName}/${schemaId}/${tableId}/${String(recordId)}`; + return `/${databaseName}/${schemaId}/tables/${tableId}/${String(recordId)}`; } diff --git a/mathesar_ui/src/stores/queries.ts b/mathesar_ui/src/stores/queries.ts index 6fb4fbf705..584a150ad8 100644 --- a/mathesar_ui/src/stores/queries.ts +++ b/mathesar_ui/src/stores/queries.ts @@ -1,11 +1,16 @@ import { derived, writable, get } from 'svelte/store'; import type { Readable, Writable, Unsubscriber } from 'svelte/store'; -import { getAPI, postAPI, putAPI } from '@mathesar/utils/api'; +import { deleteAPI, getAPI, postAPI, putAPI } from '@mathesar/utils/api'; import type { RequestStatus, PaginatedResponse } from '@mathesar/utils/api'; import { preloadCommonData } from '@mathesar/utils/preloadData'; import CacheManager from '@mathesar/utils/CacheManager'; import type { SchemaEntry } from '@mathesar/AppTypes'; -import type { QueryInstance } from '@mathesar/api/queries/queryList'; +import type { + QueryInstance, + QueryGetResponse, + QueryRunRequest, + QueryRunResponse, +} from '@mathesar/api/queries'; import { CancellablePromise } from '@mathesar-component-library'; import { currentSchemaId } from './schemas'; @@ -60,6 +65,12 @@ function setSchemaQueriesStore( return store; } +function findSchemaStoreForTable(id: QueryInstance['id']) { + return [...schemasCacheManager.cache.values()].find((entry) => + get(entry).data.has(id), + ); +} + export async function refetchQueriesForSchema( schemaId: SchemaEntry['id'], ): Promise<QueriesStoreSubstance | undefined> { @@ -151,14 +162,10 @@ export const queries: Readable<QueriesStoreSubstance> = derived( export function createQuery( newQuery: UnsavedQueryInstance, -): CancellablePromise<QueryInstance> { - const promise = postAPI<QueryInstance>('/api/db/v0/queries/', newQuery); - void promise.then(() => { - // TODO: Get schemaId as a query property - const schemaId = get(currentSchemaId); - if (schemaId) { - void refetchQueriesForSchema(schemaId); - } +): CancellablePromise<QueryGetResponse> { + const promise = postAPI<QueryGetResponse>('/api/db/v0/queries/', newQuery); + void promise.then((instance) => { + void refetchQueriesForSchema(instance.schema); return undefined; }); return promise; @@ -221,3 +228,22 @@ export function getQuery( } return new CancellablePromise((resolve) => resolve()); } + +export function runQuery( + request: QueryRunRequest, +): CancellablePromise<QueryRunResponse> { + return postAPI('/api/db/v0/queries/run/', request); +} + +export function deleteQuery(queryId: number): CancellablePromise<void> { + const promise = deleteAPI<void>(`/api/db/v0/queries/${queryId}/`); + + void promise.then(() => { + findSchemaStoreForTable(queryId)?.update((storeData) => { + storeData.data.delete(queryId); + return { ...storeData, data: new Map(storeData.data) }; + }); + return undefined; + }); + return promise; +} diff --git a/mathesar_ui/src/systems/query-builder/QueryBuilder.svelte b/mathesar_ui/src/systems/data-explorer/QueryBuilder.svelte similarity index 57% rename from mathesar_ui/src/systems/query-builder/QueryBuilder.svelte rename to mathesar_ui/src/systems/data-explorer/QueryBuilder.svelte index 664a63685d..777d9f2e1f 100644 --- a/mathesar_ui/src/systems/query-builder/QueryBuilder.svelte +++ b/mathesar_ui/src/systems/data-explorer/QueryBuilder.svelte @@ -1,26 +1,33 @@ <script lang="ts"> - import { createEventDispatcher } from 'svelte'; import { Icon, - LabeledInput, InputGroup, Button, + SpinnerButton, } from '@mathesar-component-library'; import EditableTitle from '@mathesar/components/EditableTitle.svelte'; import SelectTableWithinCurrentSchema from '@mathesar/components/SelectTableWithinCurrentSchema.svelte'; import SaveStatusIndicator from '@mathesar/components/SaveStatusIndicator.svelte'; + import NameAndDescInputModalForm from '@mathesar/components/NameAndDescInputModalForm.svelte'; import { tables as tablesDataStore } from '@mathesar/stores/tables'; import type { TableEntry } from '@mathesar/api/tables'; import { queries } from '@mathesar/stores/queries'; import { getAvailableName } from '@mathesar/utils/db'; - import { iconExploration, iconRedo, iconUndo } from '@mathesar/icons'; + import { + iconExploration, + iconRedo, + iconUndo, + iconSave, + } from '@mathesar/icons'; + import { modal } from '@mathesar/stores/modal'; + import { toast } from '@mathesar/stores/toast'; import type QueryManager from './QueryManager'; import type { ColumnWithLink } from './utils'; import ColumnSelectionPane from './column-selection-pane/ColumnSelectionPane.svelte'; import ResultPane from './result-pane/ResultPane.svelte'; import OutputConfigSidebar from './output-config-sidebar/OutputConfigSidebar.svelte'; - const dispatch = createEventDispatcher(); + const saveModalController = modal.spawnModalController(); export let queryManager: QueryManager; @@ -52,7 +59,7 @@ queryManager.selectColumn(alias); } - function handleQueryNameChange(e: Event) { + function handleNameChange(e: Event) { const target = e.target as HTMLInputElement; if (target.value.trim() === '') { target.value = getAvailableName( @@ -62,24 +69,87 @@ } void queryManager.update((q) => q.withName(target.value)); } + + function getNameValidationErrors(name: string) { + const trimmedName = name.trim(); + if (!trimmedName) { + return ['Name cannot be empty.']; + } + const isDuplicate = Array.from($queries.data ?? []).some( + ([, s]) => s.name.toLowerCase().trim() === trimmedName, + ); + if (isDuplicate) { + return ['An exploration with that name already exists.']; + } + return []; + } + + async function save() { + try { + await queryManager.save(); + } catch (err) { + toast.fromError(err); + } + } + + // TODO: Handle description + async function create(name: string) { + try { + await queryManager.update((q) => q.withName(name)); + await save(); + } catch (err) { + toast.fromError(err); + } + } + + async function saveExistingOrCreateNew() { + if ($query.isSaved()) { + await save(); + } else { + saveModalController.open(); + } + } </script> -<div class="query-builder"> +<div class="data-explorer"> <div class="header"> <div class="title-wrapper"> <div class="icon"> <Icon {...iconExploration} size="1.5em" /> </div> - <EditableTitle - value={$query.name} - size={1.266} - on:change={handleQueryNameChange} - /> + {#if $query.isSaved()} + <EditableTitle + value={$query.name} + size={1.266} + on:change={handleNameChange} + /> + <div class="base-table-holder"> + Based on {currentTable?.name} + </div> + {:else} + <div class="title">Exploring</div> + <div class="base-table-holder"> + <SelectTableWithinCurrentSchema + autoSelect="none" + table={currentTable} + on:change={(e) => onBaseTableChange(e.detail)} + /> + </div> + {/if} </div> - <SaveStatusIndicator status={$state.saveState?.state} /> <div class="actions"> + {#if $query.isSaved()} + <SaveStatusIndicator status={$state.saveState?.state} /> + {/if} + <!-- TODO: Change disabled condition to is_valid(query) --> + <SpinnerButton + label="Save" + icon={iconSave} + disabled={!$query.base_table} + onClick={saveExistingOrCreateNew} + /> <InputGroup> <Button appearance="default" @@ -98,34 +168,39 @@ <span>Redo</span> </Button> </InputGroup> - <Button appearance="default" on:click={() => dispatch('close')} - >Close</Button - > </div> </div> <div class="content-pane"> - <div class="input-sidebar"> - <div class="base-table-selector"> - <LabeledInput label="Select Base Table" layout="stacked"> - <SelectTableWithinCurrentSchema - autoSelect="clear" - table={currentTable} - on:change={(e) => onBaseTableChange(e.detail)} - /> - </LabeledInput> + {#if !$query.base_table} + <div class="help-text">Please select a table to start exploring</div> + {:else} + <div class="input-sidebar"> + <ColumnSelectionPane + {queryManager} + on:add={(e) => addColumn(e.detail)} + /> </div> - <ColumnSelectionPane {queryManager} on:add={(e) => addColumn(e.detail)} /> - </div> - <!-- Do not use inputColumnManager in ResultPane because - we'd also use ResultPane for query page where input column - details would not be available--> - <ResultPane {queryManager} /> - <OutputConfigSidebar {queryManager} /> + <!-- Do not use inputColumnManager in ResultPane because + we'd also use ResultPane for query page where input column + details would not be available--> + <ResultPane queryRunner={queryManager} /> + <OutputConfigSidebar {queryManager} /> + {/if} </div> </div> +<NameAndDescInputModalForm + controller={saveModalController} + save={create} + {getNameValidationErrors} + getInitialName={() => $query.name ?? ''} + getInitialDescription={() => ''} +> + <span slot="title"> Save Exploration </span> +</NameAndDescInputModalForm> + <style lang="scss"> - .query-builder { + .data-explorer { position: absolute; left: 0; right: 0; @@ -136,17 +211,27 @@ display: flex; align-items: center; height: 4rem; - border-bottom: 1px solid var(--color-gray-dark); + border-bottom: 1px solid var(--color-gray-medium); position: relative; overflow: hidden; + background: var(--color-gray-lighter); .title-wrapper { display: flex; align-items: center; overflow: hidden; padding: 0.7rem 1rem; - margin-right: 1rem; - border-right: 1px solid var(--color-gray-medium); + + .title { + font-size: 1.266rem; + } + + .base-table-holder { + flex-grow: 0; + flex-shrink: 0; + margin-left: 0.6rem; + min-width: 14rem; + } } .icon { @@ -181,6 +266,10 @@ right: 0; overflow-x: auto; + .help-text { + padding: 1rem; + } + .input-sidebar { width: 20rem; border-right: 1px solid var(--color-gray-medium); @@ -189,17 +278,6 @@ flex-basis: 20rem; display: flex; flex-direction: column; - - .base-table-selector { - border-bottom: 1px solid var(--color-gray-medium); - padding: 1rem; - background: var(--color-gray-light); - flex-grow: 0; - flex-shrink: 0; - :global(label) { - font-weight: 500; - } - } } } } diff --git a/mathesar_ui/src/systems/query-builder/QueryFilterTransformationModel.ts b/mathesar_ui/src/systems/data-explorer/QueryFilterTransformationModel.ts similarity index 97% rename from mathesar_ui/src/systems/query-builder/QueryFilterTransformationModel.ts rename to mathesar_ui/src/systems/data-explorer/QueryFilterTransformationModel.ts index 4df9ad0dbd..57c017cc7c 100644 --- a/mathesar_ui/src/systems/query-builder/QueryFilterTransformationModel.ts +++ b/mathesar_ui/src/systems/data-explorer/QueryFilterTransformationModel.ts @@ -1,4 +1,4 @@ -import type { QueryInstanceFilterTransformation } from '@mathesar/api/queries/queryList'; +import type { QueryInstanceFilterTransformation } from '@mathesar/api/queries'; export interface QueryFilterTransformationEntry { columnIdentifier: string; diff --git a/mathesar_ui/src/systems/query-builder/QueryListEntry.ts b/mathesar_ui/src/systems/data-explorer/QueryListEntry.ts similarity index 100% rename from mathesar_ui/src/systems/query-builder/QueryListEntry.ts rename to mathesar_ui/src/systems/data-explorer/QueryListEntry.ts diff --git a/mathesar_ui/src/systems/query-builder/QueryManager.ts b/mathesar_ui/src/systems/data-explorer/QueryManager.ts similarity index 61% rename from mathesar_ui/src/systems/query-builder/QueryManager.ts rename to mathesar_ui/src/systems/data-explorer/QueryManager.ts index 88e8c8472e..7553026d90 100644 --- a/mathesar_ui/src/systems/query-builder/QueryManager.ts +++ b/mathesar_ui/src/systems/data-explorer/QueryManager.ts @@ -4,23 +4,16 @@ import { EventHandler, ImmutableMap, isDefinedNonNullable, + CancellablePromise, } from '@mathesar-component-library'; -import type { CancellablePromise } from '@mathesar-component-library/types'; import { getAPI } from '@mathesar/utils/api'; import type { RequestStatus } from '@mathesar/utils/api'; import CacheManager from '@mathesar/utils/CacheManager'; -import type { - QueryInstance, - QueryResultColumn, - QueryResultColumns, - QueryResultRecords, -} from '@mathesar/api/queries/queryList'; +import type { QueryInstance } from '@mathesar/api/queries'; import type { TableEntry } from '@mathesar/api/tables'; import type { JoinableTablesResult } from '@mathesar/api/tables/joinable_tables'; import { createQuery, putQuery } from '@mathesar/stores/queries'; import { getTable } from '@mathesar/stores/tables'; -import Pagination from '@mathesar/utils/Pagination'; -import { toast } from '@mathesar/stores/toast'; import type { AbstractTypesMap } from '@mathesar/stores/abstract-types/types'; import { validateFilterEntry } from '@mathesar/components/filter-entry'; import type QueryModel from './QueryModel'; @@ -40,15 +33,13 @@ import type { } from './utils'; import QueryFilterTransformationModel from './QueryFilterTransformationModel'; import QuerySummarizationTransformationModel from './QuerySummarizationTransformationModel'; +import QueryRunner from './QueryRunner'; function validateQuery( queryModel: QueryModel, columnMap: ProcessedQueryResultColumnMap, ): boolean { - const general = - isDefinedNonNullable(queryModel.base_table) && - isDefinedNonNullable(queryModel.name) && - queryModel.name.trim() !== ''; + const general = isDefinedNonNullable(queryModel.base_table); if (!general) { return false; } @@ -67,14 +58,10 @@ function validateQuery( }); } -export default class QueryManager extends EventHandler<{ - save: QueryInstance; -}> { - query: Writable<QueryModel>; - - undoRedoManager: QueryUndoRedoManager; +export default class QueryManager extends QueryRunner<{ save: QueryInstance }> { + private undoRedoManager: QueryUndoRedoManager; - cacheManagers: { + private cacheManagers: { inputColumns: CacheManager<number, InputColumnsStoreSubstance>; } = { inputColumns: new CacheManager(5), @@ -83,29 +70,22 @@ export default class QueryManager extends EventHandler<{ state: Writable<{ inputColumnsFetchState?: RequestStatus; saveState?: RequestStatus; - columnsFetchState?: RequestStatus; - recordsFetchState?: RequestStatus; isUndoPossible: boolean; isRedoPossible: boolean; - lastFetchType: 'columns' | 'records' | 'both'; }> = writable({ isUndoPossible: false, isRedoPossible: false, - lastFetchType: 'both', }); - pagination: Writable<Pagination> = writable(new Pagination({ size: 100 })); - - records: Writable<QueryResultRecords> = writable({ count: 0, results: [] }); - - abstractTypeMap: AbstractTypesMap; - inputColumns: Writable<InputColumnsStoreSubstance> = writable({ baseTableColumns: new Map(), tablesThatReferenceBaseTable: new Map(), columnInformationMap: new Map(), }); + private eventHandler: EventHandler<{ save: QueryInstance }> = + new EventHandler(); + // Processed columns processedInitialColumns: Writable<ProcessedQueryResultColumnMap> = writable( @@ -120,29 +100,19 @@ export default class QueryManager extends EventHandler<{ new ImmutableMap(), ); - // Display stores - - selectedColumnAlias: Writable<QueryResultColumn['alias'] | undefined> = - writable(undefined); - // Promises - baseTableFetchPromise: CancellablePromise<TableEntry> | undefined; + private baseTableFetchPromise: CancellablePromise<TableEntry> | undefined; - joinableColumnsfetchPromise: + private joinableColumnsfetchPromise: | CancellablePromise<JoinableTablesResult> | undefined; - querySavePromise: CancellablePromise<QueryInstance> | undefined; - - queryColumnsFetchPromise: CancellablePromise<QueryResultColumns> | undefined; - - queryRecordsFetchPromise: CancellablePromise<QueryResultRecords> | undefined; + private querySavePromise: CancellablePromise<QueryInstance> | undefined; + // NEW CHANGES constructor(query: QueryModel, abstractTypeMap: AbstractTypesMap) { - super(); - this.abstractTypeMap = abstractTypeMap; - this.query = writable(query); + super(query, abstractTypeMap); this.reprocessColumns('both'); this.undoRedoManager = new QueryUndoRedoManager(); const inputColumnTreePromise = this.calculateInputColumnTree(); @@ -156,7 +126,6 @@ export default class QueryManager extends EventHandler<{ this.undoRedoManager.pushState(query, isQueryValid); return query; }); - void this.fetchColumnsAndRecords(); } private async calculateInputColumnTree(): Promise<void> { @@ -241,16 +210,6 @@ export default class QueryManager extends EventHandler<{ } } - async fetchColumnsAndRecords(): Promise< - [QueryResultColumns | undefined, QueryResultRecords | undefined] - > { - this.state.update((state) => ({ - ...state, - lastFetchType: 'both', - })); - return Promise.all([this.fetchColumns(), this.fetchResults()]); - } - /** * We are not creating a derived store so that we need to control * the callback only for essential scenarios and not everytime @@ -338,95 +297,26 @@ export default class QueryManager extends EventHandler<{ } } - private resetProcessedColumns(): void { - this.processedResultColumns.set(new ImmutableMap()); - } - - private setProcessedColumnsFromResults( - resultColumns: QueryResultColumn[], - ): void { - const newColumns = new ImmutableMap( - resultColumns.map((column) => [ - column.alias, - processColumn(column, this.abstractTypeMap), - ]), - ); - this.processedResultColumns.set(newColumns); - } - private async updateQuery(queryModel: QueryModel): Promise<{ clientValidationState: RequestStatus; - query?: QueryInstance; }> { this.query.set(queryModel); - this.state.update((_state) => ({ - ..._state, - saveState: { state: 'processing' }, - })); - - try { - this.querySavePromise?.cancel(); - if (get(this.state).inputColumnsFetchState?.state !== 'success') { - await this.calculateInputColumnTree(); - } - const isQueryValid = validateQuery( - queryModel, - get(this.processedInitialColumns).withEntries( - get(this.processedVirtualColumns), - ), - ); - if (!isQueryValid) { - this.state.update((_state) => ({ - ..._state, - saveState: { - state: 'failure', - errors: ['Query validation failed'], - }, - })); - return { - clientValidationState: { - state: 'failure', - errors: ['TODO: Place validation errors here '], - }, - }; - } - - const queryJSON = queryModel.toJSON(); - if (typeof queryJSON.id !== 'undefined') { - // TODO: Figure out a better way to help TS identify this as a saved instance - this.querySavePromise = putQuery(queryJSON as QueryInstance); - } else { - this.querySavePromise = createQuery(queryJSON); - } - const result = await this.querySavePromise; - this.query.update((qr) => qr.withId(result.id).model); - this.state.update((_state) => ({ - ..._state, - saveState: { state: 'success' }, - })); - await this.dispatch('save', result); - return { - clientValidationState: { state: 'success' }, - query: result, - }; - } catch (err) { - const errors = - err instanceof Error - ? [err.message] - : ['An error occurred while trying to save the query']; - this.state.update((_state) => ({ - ..._state, - saveState: { - state: 'failure', - errors, - }, - })); - toast.error(`Unable to save query: ${errors.join(',')}`); + if (get(this.state).inputColumnsFetchState?.state !== 'success') { + await this.calculateInputColumnTree(); } - return { - clientValidationState: { state: 'success' }, - query: undefined, - }; + const isQueryValid = validateQuery( + queryModel, + get(this.processedInitialColumns).withEntries( + get(this.processedVirtualColumns), + ), + ); + const clientValidationState: RequestStatus = isQueryValid + ? { state: 'success' } + : { + state: 'failure', + errors: ['TODO: Place validation errors here '], + }; + return { clientValidationState }; } private setUndoRedoStates(): void { @@ -437,130 +327,11 @@ export default class QueryManager extends EventHandler<{ })); } - private async fetchColumns(): Promise<QueryResultColumns | undefined> { - const q = this.getQueryModel(); - - if (typeof q.id === 'undefined') { - this.state.update((_state) => ({ - ..._state, - columnsFetchState: { state: 'success' }, - })); - this.resetProcessedColumns(); - return undefined; - } - - try { - this.state.update((_state) => ({ - ..._state, - columnsFetchState: { state: 'processing' }, - })); - this.queryColumnsFetchPromise?.cancel(); - this.queryColumnsFetchPromise = getAPI( - `/api/db/v0/queries/${q.id}/columns/`, - ); - const result = await this.queryColumnsFetchPromise; - this.setProcessedColumnsFromResults(result); - this.state.update((_state) => ({ - ..._state, - columnsFetchState: { state: 'success' }, - })); - return result; - } catch (err) { - this.state.update((_state) => ({ - ..._state, - columnsFetchState: { - state: 'failure', - errors: - err instanceof Error - ? [err.message] - : ['An error occurred while trying to fetch query columns'], - }, - })); - } - return undefined; - } - - private async fetchResults(): Promise<QueryResultRecords | undefined> { - const q = this.getQueryModel(); - - if (typeof q.id === 'undefined') { - this.state.update((_state) => ({ - ..._state, - recordsFetchState: { state: 'success' }, - })); - this.records.set({ count: 0, results: [] }); - return undefined; - } - - try { - this.state.update((_state) => ({ - ..._state, - recordsFetchState: { state: 'processing' }, - })); - this.queryRecordsFetchPromise?.cancel(); - const { limit, offset } = get(this.pagination).recordsRequestParams(); - this.queryRecordsFetchPromise = getAPI( - `/api/db/v0/queries/${q.id}/records/?limit=${limit}&offset=${offset}`, - ); - const result = await this.queryRecordsFetchPromise; - this.records.set({ - count: result.count, - results: result.results ?? [], - }); - this.state.update((_state) => ({ - ..._state, - recordsFetchState: { state: 'success' }, - })); - return result; - } catch (err) { - this.state.update((_state) => ({ - ..._state, - recordsFetchState: { - state: 'failure', - errors: - err instanceof Error - ? [err.message] - : ['An error occurred while trying to fetch query records'], - }, - })); - } - return undefined; - } - - async setPagination( - pagination: Pagination, - ): Promise<QueryResultRecords | undefined> { - this.pagination.set(pagination); - this.state.update((state) => ({ - ...state, - lastFetchType: 'records', - })); - const result = await this.fetchResults(); - return result; - } - - private resetPaginationPane(): void { - this.pagination.update( - (pagination) => - new Pagination({ - ...pagination, - page: 1, - }), - ); - } - - private resetResults(): void { - this.queryColumnsFetchPromise?.cancel(); - this.queryRecordsFetchPromise?.cancel(); - this.records.set({ count: 0, results: [] }); - this.resetProcessedColumns(); - this.selectedColumnAlias.set(undefined); + private resetState(): void { this.state.update((state) => ({ ...state, - columnsFetchState: undefined, - recordsFetchState: undefined, })); - this.resetPaginationPane(); + this.resetResults(); } async update( @@ -574,7 +345,9 @@ export default class QueryManager extends EventHandler<{ if (isValid) { switch (updateDiff.type) { case 'baseTable': - this.resetResults(); + this.resetState(); + this.undoRedoManager.clear(); + this.setUndoRedoStates(); await this.calculateInputColumnTree(); break; case 'initialColumnName': @@ -583,15 +356,14 @@ export default class QueryManager extends EventHandler<{ case 'initialColumnsArray': if (!updateDiff.diff.initial_columns?.length) { // All columns have been deleted - this.resetResults(); + this.resetState(); } else { this.reprocessColumns('initial'); - await this.fetchColumnsAndRecords(); + await this.run(); } break; case 'transformations': - this.resetPaginationPane(); - await this.fetchColumnsAndRecords(); + await this.resetPaginationAndRun(); break; default: break; @@ -599,13 +371,6 @@ export default class QueryManager extends EventHandler<{ } } - // Meant to be used directly outside query manager - async save(): Promise<void> { - await this.updateQuery(this.getQueryModel()); - this.resetPaginationPane(); - await this.fetchColumnsAndRecords(); - } - private async performUndoRedoSync(query?: QueryModel): Promise<void> { if (query) { const currentQueryModelData = this.getQueryModel(); @@ -617,7 +382,7 @@ export default class QueryManager extends EventHandler<{ this.reprocessColumns('both'); await this.updateQuery(queryToSet); this.setUndoRedoStates(); - await this.fetchColumnsAndRecords(); + await this.run(); } else { this.setUndoRedoStates(); } @@ -633,28 +398,49 @@ export default class QueryManager extends EventHandler<{ await this.performUndoRedoSync(query); } - getQueryModel(): QueryModel { - return get(this.query); - } - - selectColumn(alias: QueryResultColumn['alias']): void { - if ( - get(this.query).initial_columns.some((column) => column.alias === alias) - ) { - this.selectedColumnAlias.set(alias); - } else { - this.selectedColumnAlias.set(undefined); + /** + * @throws Error if unable to save + */ + async save(): Promise<QueryModel> { + const queryJSON = this.getQueryModel().toJSON(); + this.state.update((_state) => ({ + ..._state, + saveState: { state: 'processing' }, + })); + try { + this.querySavePromise?.cancel(); + // TODO: Check for latest validation status here + if (queryJSON.id !== undefined) { + // TODO: Figure out a better way to help TS identify this as a saved instance + this.querySavePromise = putQuery(queryJSON as QueryInstance); + } else { + this.querySavePromise = createQuery(queryJSON); + } + const result = await this.querySavePromise; + this.query.update((qr) => qr.withId(result.id).model); + await this.dispatch('save', result); + this.state.update((_state) => ({ + ..._state, + saveState: { state: 'success' }, + })); + return this.getQueryModel(); + } catch (err) { + const errors = + err instanceof Error + ? [err.message] + : ['An error occurred while trying to save the query']; + this.state.update((_state) => ({ + ..._state, + saveState: { + state: 'failure', + errors, + }, + })); + throw err; } } - clearSelectedColumn(): void { - this.selectedColumnAlias.set(undefined); - } - destroy(): void { super.destroy(); - this.queryColumnsFetchPromise?.cancel(); - this.queryColumnsFetchPromise?.cancel(); - this.queryRecordsFetchPromise?.cancel(); } } diff --git a/mathesar_ui/src/systems/query-builder/QueryModel.ts b/mathesar_ui/src/systems/data-explorer/QueryModel.ts similarity index 92% rename from mathesar_ui/src/systems/query-builder/QueryModel.ts rename to mathesar_ui/src/systems/data-explorer/QueryModel.ts index 3c9fa4380b..85dd355b75 100644 --- a/mathesar_ui/src/systems/query-builder/QueryModel.ts +++ b/mathesar_ui/src/systems/data-explorer/QueryModel.ts @@ -1,7 +1,7 @@ import type { QueryInstanceInitialColumn, QueryInstanceTransformation, -} from '@mathesar/api/queries/queryList'; +} from '@mathesar/api/queries'; import type { UnsavedQueryInstance } from '@mathesar/stores/queries'; import QueryFilterTransformationModel from './QueryFilterTransformationModel'; import QuerySummarizationTransformationModel from './QuerySummarizationTransformationModel'; @@ -156,22 +156,6 @@ export default class QueryModel { }; } - withTransformations( - transformations?: QueryInstanceTransformation[], - ): QueryModelUpdateDiff { - const model = new QueryModel({ - ...this, - transformations, - }); - return { - model, - type: 'transformations', - diff: { - transformations, - }, - }; - } - withTransformationModels( transformationModels?: QueryTransformationModel[], ): QueryModelUpdateDiff { @@ -203,4 +187,8 @@ export default class QueryModel { ), }; } + + isSaved(): boolean { + return !!this.id; + } } diff --git a/mathesar_ui/src/systems/data-explorer/QueryRunner.ts b/mathesar_ui/src/systems/data-explorer/QueryRunner.ts new file mode 100644 index 0000000000..06db737ae2 --- /dev/null +++ b/mathesar_ui/src/systems/data-explorer/QueryRunner.ts @@ -0,0 +1,152 @@ +import { get, writable } from 'svelte/store'; +import type { Writable } from 'svelte/store'; +import type { RequestStatus } from '@mathesar/utils/api'; +import { + ImmutableMap, + CancellablePromise, + EventHandler, +} from '@mathesar-component-library'; +import Pagination from '@mathesar/utils/Pagination'; +import type { + QueryResultRecords, + QueryRunResponse, + QueryResultColumn, +} from '@mathesar/api/queries'; +import { runQuery } from '@mathesar/stores/queries'; +import type { AbstractTypesMap } from '@mathesar/stores/abstract-types/types'; +import type QueryModel from './QueryModel'; +import { processColumns } from './utils'; +import type { ProcessedQueryResultColumnMap } from './utils'; + +// TODO: Find a better way to implement type safety here +type QueryRunEvent = { run: QueryRunResponse }; +type Events = Record<string, unknown> & Partial<QueryRunEvent>; + +export default class QueryRunner< + T extends Events = Events, +> extends EventHandler<T & QueryRunEvent> { + query: Writable<QueryModel>; + + abstractTypeMap: AbstractTypesMap; + + runState: Writable<RequestStatus | undefined> = writable(); + + pagination: Writable<Pagination> = writable(new Pagination({ size: 100 })); + + records: Writable<QueryResultRecords> = writable({ count: 0, results: [] }); + + processedColumns: Writable<ProcessedQueryResultColumnMap> = writable( + new ImmutableMap(), + ); + + // Display stores + + selectedColumnAlias: Writable<QueryResultColumn['alias'] | undefined> = + writable(undefined); + + private runPromise: CancellablePromise<QueryRunResponse> | undefined; + + constructor(query: QueryModel, abstractTypeMap: AbstractTypesMap) { + super(); + this.abstractTypeMap = abstractTypeMap; + this.query = writable(query); + void this.run(); + } + + async run(): Promise<QueryRunResponse | undefined> { + this.runPromise?.cancel(); + const queryModel = this.getQueryModel(); + + if (queryModel.base_table === undefined) { + const records = { count: 0, results: [] }; + this.processedColumns.set(new ImmutableMap()); + this.records.set(records); + this.runState.set({ state: 'success' }); + return undefined; + } + + try { + const paginationRequest = get(this.pagination).recordsRequestParams(); + this.runState.set({ state: 'processing' }); + this.runPromise = runQuery({ + base_table: queryModel.base_table, + initial_columns: queryModel.initial_columns, + transformations: queryModel.transformationModels.map((transformation) => + transformation.toJSON(), + ), + parameters: { + ...paginationRequest, + }, + }); + const response = await this.runPromise; + this.processedColumns.set(processColumns(response, this.abstractTypeMap)); + this.records.set(response.records); + await this.dispatch('run', response); + this.runState.set({ state: 'success' }); + return response; + } catch (err) { + const errorMessage = + err instanceof Error + ? err.message + : 'Unable to run query due to an unknown reason'; + this.runState.set({ state: 'failure', errors: [errorMessage] }); + } + return undefined; + } + + async setPagination( + pagination: Pagination, + ): Promise<QueryResultRecords | undefined> { + this.pagination.set(pagination); + const result = await this.run(); + return result?.records; + } + + protected resetPagination(): void { + this.pagination.update( + (pagination) => + new Pagination({ + ...pagination, + page: 1, + }), + ); + } + + protected resetResults(): void { + this.selectedColumnAlias.set(undefined); + this.runPromise?.cancel(); + this.resetPagination(); + this.records.set({ count: 0, results: [] }); + this.processedColumns.set(new ImmutableMap()); + this.runState.set(undefined); + } + + protected async resetPaginationAndRun(): Promise< + QueryRunResponse | undefined + > { + this.resetPagination(); + return this.run(); + } + + selectColumn(alias: QueryResultColumn['alias']): void { + if ( + get(this.query).initial_columns.some((column) => column.alias === alias) + ) { + this.selectedColumnAlias.set(alias); + } else { + this.selectedColumnAlias.set(undefined); + } + } + + clearSelectedColumn(): void { + this.selectedColumnAlias.set(undefined); + } + + getQueryModel(): QueryModel { + return get(this.query); + } + + destroy(): void { + this.runPromise?.cancel(); + } +} diff --git a/mathesar_ui/src/systems/query-builder/QuerySummarizationTransformationModel.ts b/mathesar_ui/src/systems/data-explorer/QuerySummarizationTransformationModel.ts similarity index 96% rename from mathesar_ui/src/systems/query-builder/QuerySummarizationTransformationModel.ts rename to mathesar_ui/src/systems/data-explorer/QuerySummarizationTransformationModel.ts index 7b6d23e67e..5fc75c9693 100644 --- a/mathesar_ui/src/systems/query-builder/QuerySummarizationTransformationModel.ts +++ b/mathesar_ui/src/systems/data-explorer/QuerySummarizationTransformationModel.ts @@ -1,5 +1,5 @@ -import type { QueryInstanceSummarizationTransformation } from '@mathesar/api/queries/queryList'; -import { ImmutableMap } from '@mathesar/component-library'; +import type { QueryInstanceSummarizationTransformation } from '@mathesar/api/queries'; +import { ImmutableMap } from '@mathesar-component-library'; export interface QuerySummarizationAggregationEntry { inputAlias: string; diff --git a/mathesar_ui/src/systems/query-builder/QueryUndoRedoManager.ts b/mathesar_ui/src/systems/data-explorer/QueryUndoRedoManager.ts similarity index 96% rename from mathesar_ui/src/systems/query-builder/QueryUndoRedoManager.ts rename to mathesar_ui/src/systems/data-explorer/QueryUndoRedoManager.ts index 447139cb9f..ffa958b9e4 100644 --- a/mathesar_ui/src/systems/query-builder/QueryUndoRedoManager.ts +++ b/mathesar_ui/src/systems/data-explorer/QueryUndoRedoManager.ts @@ -58,4 +58,8 @@ export default class QueryUndoRedoManager { } return undefined; } + + clear(): void { + this.current = undefined; + } } diff --git a/mathesar_ui/src/systems/query-builder/column-selection-pane/ColumnSelectionPane.svelte b/mathesar_ui/src/systems/data-explorer/column-selection-pane/ColumnSelectionPane.svelte similarity index 100% rename from mathesar_ui/src/systems/query-builder/column-selection-pane/ColumnSelectionPane.svelte rename to mathesar_ui/src/systems/data-explorer/column-selection-pane/ColumnSelectionPane.svelte diff --git a/mathesar_ui/src/systems/query-builder/column-selection-pane/SelectableColumn.svelte b/mathesar_ui/src/systems/data-explorer/column-selection-pane/SelectableColumn.svelte similarity index 100% rename from mathesar_ui/src/systems/query-builder/column-selection-pane/SelectableColumn.svelte rename to mathesar_ui/src/systems/data-explorer/column-selection-pane/SelectableColumn.svelte diff --git a/mathesar_ui/src/systems/query-builder/column-selection-pane/SelectableColumnTree.svelte b/mathesar_ui/src/systems/data-explorer/column-selection-pane/SelectableColumnTree.svelte similarity index 100% rename from mathesar_ui/src/systems/query-builder/column-selection-pane/SelectableColumnTree.svelte rename to mathesar_ui/src/systems/data-explorer/column-selection-pane/SelectableColumnTree.svelte diff --git a/mathesar_ui/src/systems/query-builder/column-selection-pane/TableGroupCollapsible.svelte b/mathesar_ui/src/systems/data-explorer/column-selection-pane/TableGroupCollapsible.svelte similarity index 100% rename from mathesar_ui/src/systems/query-builder/column-selection-pane/TableGroupCollapsible.svelte rename to mathesar_ui/src/systems/data-explorer/column-selection-pane/TableGroupCollapsible.svelte diff --git a/mathesar_ui/src/systems/data-explorer/index.ts b/mathesar_ui/src/systems/data-explorer/index.ts new file mode 100644 index 0000000000..06babd50dc --- /dev/null +++ b/mathesar_ui/src/systems/data-explorer/index.ts @@ -0,0 +1,6 @@ +export { default as DataExplorer } from './QueryBuilder.svelte'; +export { default as QueryManager } from './QueryManager'; +export { default as QueryRunner } from './QueryRunner'; +export { default as QueryModel } from './QueryModel'; +export { default as ExplorationResult } from './result-pane/Results.svelte'; +export * from './urlSerializationUtils'; diff --git a/mathesar_ui/src/systems/query-builder/output-config-sidebar/FilterTransformation.svelte b/mathesar_ui/src/systems/data-explorer/output-config-sidebar/FilterTransformation.svelte similarity index 100% rename from mathesar_ui/src/systems/query-builder/output-config-sidebar/FilterTransformation.svelte rename to mathesar_ui/src/systems/data-explorer/output-config-sidebar/FilterTransformation.svelte diff --git a/mathesar_ui/src/systems/query-builder/output-config-sidebar/OutputConfigSidebar.svelte b/mathesar_ui/src/systems/data-explorer/output-config-sidebar/OutputConfigSidebar.svelte similarity index 100% rename from mathesar_ui/src/systems/query-builder/output-config-sidebar/OutputConfigSidebar.svelte rename to mathesar_ui/src/systems/data-explorer/output-config-sidebar/OutputConfigSidebar.svelte diff --git a/mathesar_ui/src/systems/query-builder/output-config-sidebar/TransformationsPane.svelte b/mathesar_ui/src/systems/data-explorer/output-config-sidebar/TransformationsPane.svelte similarity index 100% rename from mathesar_ui/src/systems/query-builder/output-config-sidebar/TransformationsPane.svelte rename to mathesar_ui/src/systems/data-explorer/output-config-sidebar/TransformationsPane.svelte diff --git a/mathesar_ui/src/systems/query-builder/output-config-sidebar/summarization/Aggregation.svelte b/mathesar_ui/src/systems/data-explorer/output-config-sidebar/summarization/Aggregation.svelte similarity index 100% rename from mathesar_ui/src/systems/query-builder/output-config-sidebar/summarization/Aggregation.svelte rename to mathesar_ui/src/systems/data-explorer/output-config-sidebar/summarization/Aggregation.svelte diff --git a/mathesar_ui/src/systems/query-builder/output-config-sidebar/summarization/SummarizationTransformation.svelte b/mathesar_ui/src/systems/data-explorer/output-config-sidebar/summarization/SummarizationTransformation.svelte similarity index 100% rename from mathesar_ui/src/systems/query-builder/output-config-sidebar/summarization/SummarizationTransformation.svelte rename to mathesar_ui/src/systems/data-explorer/output-config-sidebar/summarization/SummarizationTransformation.svelte diff --git a/mathesar_ui/src/systems/query-builder/output-config-sidebar/transformationUtils.ts b/mathesar_ui/src/systems/data-explorer/output-config-sidebar/transformationUtils.ts similarity index 100% rename from mathesar_ui/src/systems/query-builder/output-config-sidebar/transformationUtils.ts rename to mathesar_ui/src/systems/data-explorer/output-config-sidebar/transformationUtils.ts diff --git a/mathesar_ui/src/systems/data-explorer/result-pane/ResultPane.svelte b/mathesar_ui/src/systems/data-explorer/result-pane/ResultPane.svelte new file mode 100644 index 0000000000..483988fc06 --- /dev/null +++ b/mathesar_ui/src/systems/data-explorer/result-pane/ResultPane.svelte @@ -0,0 +1,83 @@ +<script lang="ts"> + import { Button, Spinner, Icon } from '@mathesar-component-library'; + import { iconRefresh } from '@mathesar/icons'; + import type QueryRunner from '../QueryRunner'; + import Results from './Results.svelte'; + + export let queryRunner: QueryRunner; + + $: ({ query, runState } = queryRunner); + $: ({ base_table, initial_columns } = $query); + + $: columnRunState = $runState?.state; + $: recordRunState = $runState?.state; +</script> + +<section data-identifier="result"> + <header> + <span class="title">Result</span> + {#if base_table && initial_columns.length} + <span class="info"> + {#if columnRunState === 'processing' || recordRunState === 'processing'} + Running query + <Spinner /> + {:else if columnRunState === 'failure' || recordRunState === 'failure'} + Query failed to run + <Button + appearance="plain" + size="small" + class="padding-zero" + on:click={() => queryRunner.run()} + > + <Icon {...iconRefresh} size="0.6rem" /> + <span>Retry</span> + </Button> + {/if} + </span> + {/if} + </header> + {#if !initial_columns.length} + <div class="empty-state"> + Please add a column from the column selection pane to get started. + </div> + {:else} + <Results {queryRunner} /> + {/if} +</section> + +<style lang="scss"> + section { + position: relative; + flex-grow: 1; + overflow: hidden; + flex-shrink: 0; + margin: 10px; + display: flex; + flex-direction: column; + border: 1px solid #e5e5e5; + border-radius: 4px; + + header { + padding: 8px 10px; + border-bottom: 1px solid #e5e5e5; + display: flex; + align-items: center; + + .title { + font-weight: 600; + } + .info { + margin-left: 8px; + color: #71717a; + font-size: 0.875rem; + display: inline-flex; + align-items: center; + gap: 4px; + } + } + + .empty-state { + padding: 1rem; + } + } +</style> diff --git a/mathesar_ui/src/systems/data-explorer/result-pane/Results.svelte b/mathesar_ui/src/systems/data-explorer/result-pane/Results.svelte new file mode 100644 index 0000000000..4e9122d89a --- /dev/null +++ b/mathesar_ui/src/systems/data-explorer/result-pane/Results.svelte @@ -0,0 +1,273 @@ +<script lang="ts"> + import { Button, ImmutableMap } from '@mathesar-component-library'; + import { + Sheet, + SheetHeader, + SheetVirtualRows, + SheetRow, + SheetCell, + SheetCellResizer, + } from '@mathesar/components/sheet'; + import PaginationGroup from '@mathesar/components/PaginationGroup.svelte'; + import CellFabric from '@mathesar/components/cell-fabric/CellFabric.svelte'; + import ColumnName from '@mathesar/components/column/ColumnName.svelte'; + import type QueryRunner from '../QueryRunner'; + + export let queryRunner: QueryRunner; + + const ID_ROW_CONTROL_COLUMN = 'row-control'; + + $: ({ + query, + processedColumns, + records, + selectedColumnAlias, + pagination, + runState, + } = queryRunner); + $: ({ initial_columns } = $query); + + $: columnRunState = $runState?.state; + $: recordRunState = $runState?.state; + + $: errors = $runState?.state === 'failure' ? $runState.errors : []; + $: columnList = [...$processedColumns.values()]; + $: sheetColumns = columnList.length + ? [{ id: ID_ROW_CONTROL_COLUMN }, ...columnList] + : []; + // Show a dummy ghost row when there are no records + $: showDummyGhostRow = + recordRunState === 'success' && !$records.results.length; + $: sheetItemCount = showDummyGhostRow ? 1 : $records.results.length; + + const columnWidths = new ImmutableMap([[ID_ROW_CONTROL_COLUMN, 70]]); + + function checkAndUnselectColumn(e: MouseEvent) { + const target = e.target as HTMLElement; + if ( + target.closest( + '[data-sheet-element="header"] [data-sheet-element="cell"]', + ) + ) { + return; + } + if ($selectedColumnAlias) { + const closestCell = target.closest( + '[data-sheet-element="row"] [data-sheet-element="cell"]', + ); + if ( + closestCell && + closestCell.querySelector( + `[data-column-identifier="${$selectedColumnAlias}"]`, + ) + ) { + return; + } + } + queryRunner.clearSelectedColumn(); + } +</script> + +<div data-identifier="query-run-result"> + {#if !initial_columns.length} + <div class="empty-state"> + This exploration does not contain any columns. Edit the exploration to add + columns to it. + </div> + {:else if errors.length} + <div class="empty-state errors"> + {#each errors as error} + <p>{error}</p> + {/each} + </div> + {:else} + <Sheet + columns={sheetColumns} + getColumnIdentifier={(c) => c.id} + {columnWidths} + on:click={checkAndUnselectColumn} + usesVirtualList + > + <SheetHeader> + <SheetCell + columnIdentifierKey={ID_ROW_CONTROL_COLUMN} + isStatic + isControlCell + let:htmlAttributes + let:style + > + <div {...htmlAttributes} {style} /> + </SheetCell> + + {#each columnList as processedQueryColumn (processedQueryColumn.id)} + <SheetCell + columnIdentifierKey={processedQueryColumn.id} + let:htmlAttributes + let:style + > + <div {...htmlAttributes} {style}> + <Button + appearance="plain" + class="column-name-wrapper {$selectedColumnAlias === + processedQueryColumn.column.alias + ? 'selected' + : ''}" + on:click={() => { + queryRunner.selectColumn(processedQueryColumn.column.alias); + }} + > + <!--TODO: Use a separate prop to identify column that isn't fetched yet + instead of type:unknown--> + <ColumnName + isLoading={columnRunState === 'processing' && + processedQueryColumn.column.type === 'unknown'} + column={{ + ...processedQueryColumn.column, + name: + processedQueryColumn.column.display_name ?? + processedQueryColumn.column.alias, + }} + /> + </Button> + <SheetCellResizer columnIdentifierKey={processedQueryColumn.id} /> + </div> + </SheetCell> + {/each} + </SheetHeader> + + <SheetVirtualRows + itemCount={sheetItemCount} + paddingBottom={30} + itemSize={() => 30} + let:items + > + {#each items as item (item.key)} + {#if $records.results[item.index] || showDummyGhostRow} + <SheetRow style={item.style} let:htmlAttributes let:styleString> + <div {...htmlAttributes} style={styleString}> + <SheetCell + columnIdentifierKey={ID_ROW_CONTROL_COLUMN} + isStatic + isControlCell + let:htmlAttributes + let:style + > + <div {...htmlAttributes} {style}> + {$pagination.offset + item.index + 1} + </div> + </SheetCell> + + {#each columnList as processedQueryColumn (processedQueryColumn.id)} + <SheetCell + columnIdentifierKey={processedQueryColumn.id} + let:htmlAttributes + let:style + > + <div + {...htmlAttributes} + {style} + class={$selectedColumnAlias === + processedQueryColumn.column.alias + ? 'selected' + : ''} + > + {#if $records.results[item.index]} + <CellFabric + columnFabric={processedQueryColumn} + value={$records.results[item.index][ + processedQueryColumn.id + ]} + showAsSkeleton={recordRunState === 'processing'} + disabled={true} + /> + {/if} + </div> + </SheetCell> + {/each} + </div> + </SheetRow> + {/if} + {/each} + </SheetVirtualRows> + </Sheet> + <div data-identifier="status-bar"> + {#if $records.count} + <div> + Showing {$pagination.leftBound}-{Math.min( + $records.count, + $pagination.rightBound, + )} of {$records.count} + </div> + {:else if recordRunState === 'success'} + No results found + {/if} + <PaginationGroup + pagination={$pagination} + totalCount={$records.count} + on:change={(e) => { + void queryRunner.setPagination(e.detail); + }} + /> + </div> + {/if} +</div> + +<style lang="scss"> + [data-identifier='query-run-result'] { + position: relative; + flex-grow: 1; + overflow: hidden; + flex-shrink: 0; + display: flex; + flex-direction: column; + + .empty-state { + padding: 1rem; + + &.errors { + color: var(--danger-color); + } + + p { + margin: 0; + } + } + + :global(.sheet) { + bottom: 2.7rem; + } + + [data-identifier='status-bar'] { + flex-grow: 0; + flex-shrink: 0; + border-top: 1px solid #dfdfdf; + padding: 0.2rem 0.6rem; + background: #fafafa; + display: flex; + align-items: center; + margin-top: auto; + height: 2.7rem; + + :global(.pagination-group) { + margin-left: auto; + } + } + + :global(button.column-name-wrapper) { + flex: 1; + padding: 6px 8px; + overflow: hidden; + height: 100%; + display: block; + overflow: hidden; + text-align: left; + } + + :global(.column-name-wrapper.selected) { + background: #dedede !important; + } + :global([data-sheet-element='cell'].selected) { + background: #fafafa; + } + } +</style> diff --git a/mathesar_ui/src/systems/data-explorer/types.ts b/mathesar_ui/src/systems/data-explorer/types.ts new file mode 100644 index 0000000000..fdb47f4958 --- /dev/null +++ b/mathesar_ui/src/systems/data-explorer/types.ts @@ -0,0 +1,2 @@ +export type { default as QueryManager } from './QueryManager'; +export type { default as QueryRunner } from './QueryRunner'; diff --git a/mathesar_ui/src/systems/query-builder/urlSerializationUtils.ts b/mathesar_ui/src/systems/data-explorer/urlSerializationUtils.ts similarity index 91% rename from mathesar_ui/src/systems/query-builder/urlSerializationUtils.ts rename to mathesar_ui/src/systems/data-explorer/urlSerializationUtils.ts index 20726ad2c4..ea12564ba5 100644 --- a/mathesar_ui/src/systems/query-builder/urlSerializationUtils.ts +++ b/mathesar_ui/src/systems/data-explorer/urlSerializationUtils.ts @@ -47,9 +47,10 @@ export function constructQueryModelFromTerseSummarizationHash( if (!groupedColumn) { return {}; } - const aggregatedColumns = terseSummarization.columns.filter( + const firstNonGroupColumn = terseSummarization.columns.find( (entry) => entry.id !== groupedColumnId, ); + const aggregatedColumns = firstNonGroupColumn ? [firstNonGroupColumn] : []; return { base_table: terseSummarization.baseTableId, @@ -72,13 +73,13 @@ export function constructQueryModelFromTerseSummarizationHash( aggregation_expressions: aggregatedColumns.map((entry) => ({ input_alias: entry.name, output_alias: `${entry.name} (aggregated)`, - function: 'aggregate_to_array', + function: 'count', })), }, display_names: aggregatedColumns.reduce( (displayNames, entry) => ({ ...displayNames, - [`${entry.name} (aggregated)`]: `${entry.name} (aggregated)`, + [`${entry.name} (aggregated)`]: `Count(${entry.name})`, }), {} as Record<string, string>, ), diff --git a/mathesar_ui/src/systems/query-builder/utils.ts b/mathesar_ui/src/systems/data-explorer/utils.ts similarity index 89% rename from mathesar_ui/src/systems/query-builder/utils.ts rename to mathesar_ui/src/systems/data-explorer/utils.ts index 5bb671abc2..26c3055acc 100644 --- a/mathesar_ui/src/systems/query-builder/utils.ts +++ b/mathesar_ui/src/systems/data-explorer/utils.ts @@ -1,6 +1,9 @@ import { ImmutableMap } from '@mathesar-component-library'; import type { ComponentAndProps } from '@mathesar-component-library/types'; -import type { QueryResultColumn } from '@mathesar/api/queries/queryList'; +import type { + QueryResultColumn, + QueryRunResponse, +} from '@mathesar/api/queries'; import { getAbstractTypeForDbType, getFiltersForAbstractType, @@ -24,6 +27,14 @@ import type { import type { Column } from '@mathesar/api/tables/columns'; import type QueryModel from './QueryModel'; +export type ColumnOperationalState = + | { + state: 'processing'; + processType?: 'creation' | 'deletion' | 'modification'; + } + | { state: 'success' } + | { state: 'failure'; errors: string[] }; + export interface ProcessedQueryResultColumn extends CellColumnFabric { id: QueryResultColumn['alias']; column: QueryResultColumn; @@ -31,6 +42,8 @@ export interface ProcessedQueryResultColumn extends CellColumnFabric { inputComponentAndProps: ComponentAndProps; allowedFiltersMap: ReturnType<typeof getFiltersForAbstractType>; preprocFunctions: AbstractTypePreprocFunctionDefinition[]; + // Make this mandatory later + operationalState?: ColumnOperationalState; } export type ProcessedQueryResultColumnMap = ImmutableMap< @@ -317,3 +330,32 @@ export function getTablesThatReferenceBaseTable( return references; } + +/** ======== */ + +export function processColumns( + columnInformation: Pick< + QueryRunResponse, + 'output_columns' | 'column_metadata' + >, + abstractTypeMap: AbstractTypesMap, +): ProcessedQueryResultColumnMap { + return new ImmutableMap( + columnInformation.output_columns.map((alias) => { + const columnMetaData = columnInformation.column_metadata[alias]; + return [ + alias, + processColumn( + { + alias, + display_name: columnMetaData.display_name ?? alias, + type: columnMetaData.type ?? 'unknown', + type_options: columnMetaData.type_options, + display_options: columnMetaData.display_options, + }, + abstractTypeMap, + ), + ]; + }), + ); +} diff --git a/mathesar_ui/src/systems/query-builder/result-pane/ResultPane.svelte b/mathesar_ui/src/systems/query-builder/result-pane/ResultPane.svelte deleted file mode 100644 index 09004f4389..0000000000 --- a/mathesar_ui/src/systems/query-builder/result-pane/ResultPane.svelte +++ /dev/null @@ -1,320 +0,0 @@ -<script lang="ts"> - import { Button, Spinner, Icon } from '@mathesar-component-library'; - import { - Sheet, - SheetHeader, - SheetVirtualRows, - SheetRow, - SheetCell, - SheetCellResizer, - } from '@mathesar/components/sheet'; - import PaginationGroup from '@mathesar/components/PaginationGroup.svelte'; - import CellFabric from '@mathesar/components/cell-fabric/CellFabric.svelte'; - import ColumnName from '@mathesar/components/column/ColumnName.svelte'; - import { iconRefresh } from '@mathesar/icons'; - import type QueryManager from '../QueryManager'; - - export let queryManager: QueryManager; - - $: ({ - query, - processedResultColumns, - records, - state, - selectedColumnAlias, - pagination, - } = queryManager); - $: ({ base_table, initial_columns } = $query); - - $: columnRunState = $state.columnsFetchState?.state; - $: recordRunState = $state.recordsFetchState?.state; - $: lastFetchTypeEqualsRecords = $state.lastFetchType === 'records'; - - $: columnRunErrors = - $state.columnsFetchState?.state === 'failure' - ? $state.columnsFetchState.errors - : []; - $: recordRunErrors = - $state.recordsFetchState?.state === 'failure' - ? $state.recordsFetchState.errors - : []; - // Prioritize showing column errors over record fetch errors - $: errors = columnRunErrors.length > 0 ? columnRunErrors : recordRunErrors; - $: columnList = [...$processedResultColumns.values()]; - // Show a dummy ghost row when there are no records - $: showDummyGhostRow = - recordRunState === 'success' && !$records.results.length; - $: sheetItemCount = showDummyGhostRow ? 1 : $records.results.length; - - function checkAndUnselectColumn(e: MouseEvent) { - const target = e.target as HTMLElement; - if ( - target.closest( - '[data-sheet-element="header"] [data-sheet-element="cell"]', - ) - ) { - return; - } - if ($selectedColumnAlias) { - const closestCell = target.closest( - '[data-sheet-element="row"] [data-sheet-element="cell"]', - ); - if ( - closestCell && - closestCell.querySelector( - `[data-column-identifier="${$selectedColumnAlias}"]`, - ) - ) { - return; - } - } - queryManager.clearSelectedColumn(); - } -</script> - -<section data-identifier="result"> - <header> - <span class="title">Result</span> - {#if base_table && initial_columns.length} - <span class="info"> - {#if columnRunState === 'processing' || recordRunState === 'processing'} - Running query - <Spinner /> - {:else if columnRunState === 'failure' || recordRunState === 'failure'} - Query failed to run - <Button - appearance="plain" - size="small" - class="padding-zero" - on:click={() => queryManager.fetchColumnsAndRecords()} - > - <Icon {...iconRefresh} size="0.6rem" /> - <span>Retry</span> - </Button> - {/if} - </span> - {/if} - </header> - <div data-identifier="result-content"> - {#if !base_table} - <div class="empty-state"> - Please select the base table to get started. - </div> - {:else if !initial_columns.length} - <div class="empty-state"> - Please add a column from the column selection pane to get started. - </div> - {:else if errors.length} - <div class="empty-state errors"> - {#each errors as error} - <p>{error}</p> - {/each} - </div> - {:else} - <Sheet - columns={columnList} - getColumnIdentifier={(c) => c.id} - on:click={checkAndUnselectColumn} - usesVirtualList - > - <SheetHeader> - {#each columnList as processedQueryColumn (processedQueryColumn.id)} - <SheetCell - columnIdentifierKey={processedQueryColumn.id} - let:htmlAttributes - let:style - > - <div {...htmlAttributes} {style}> - <Button - appearance="plain" - class="column-name-wrapper {$selectedColumnAlias === - processedQueryColumn.column.alias - ? 'selected' - : ''}" - on:click={() => { - queryManager.selectColumn( - processedQueryColumn.column.alias, - ); - }} - > - <!--TODO: Use a separate prop to identify column that isn't fetched yet - instead of type:unknown--> - <ColumnName - isLoading={columnRunState === 'processing' && - processedQueryColumn.column.type === 'unknown'} - column={{ - ...processedQueryColumn.column, - name: - processedQueryColumn.column.display_name ?? - processedQueryColumn.column.alias, - }} - /> - </Button> - <SheetCellResizer - columnIdentifierKey={processedQueryColumn.id} - /> - </div> - </SheetCell> - {/each} - </SheetHeader> - - <SheetVirtualRows - itemCount={sheetItemCount} - paddingBottom={30} - itemSize={() => 30} - let:items - > - {#each items as item (item.key)} - {#if $records.results[item.index] || showDummyGhostRow} - <SheetRow style={item.style} let:htmlAttributes let:styleString> - <div {...htmlAttributes} style={styleString}> - {#each columnList as processedQueryColumn (processedQueryColumn.id)} - <SheetCell - columnIdentifierKey={processedQueryColumn.id} - let:htmlAttributes - let:style - > - <div - {...htmlAttributes} - {style} - class={$selectedColumnAlias === - processedQueryColumn.column.alias - ? 'selected' - : ''} - > - {#if $records.results[item.index]} - <CellFabric - columnFabric={processedQueryColumn} - value={$records.results[item.index][ - processedQueryColumn.id - ]} - showAsSkeleton={recordRunState === 'processing' && - (lastFetchTypeEqualsRecords || - $records.results[item.index][ - processedQueryColumn.id - ] === undefined)} - disabled={true} - /> - {/if} - </div> - </SheetCell> - {/each} - </div> - </SheetRow> - {/if} - {/each} - </SheetVirtualRows> - </Sheet> - <div data-identifier="status-bar"> - {#if $records.count} - <div> - Showing {$pagination.leftBound}-{Math.min( - $records.count, - $pagination.rightBound, - )} of {$records.count} - </div> - {:else if recordRunState === 'success'} - No results found - {/if} - <PaginationGroup - pagination={$pagination} - totalCount={$records.count} - on:change={(e) => { - void queryManager.setPagination(e.detail); - }} - /> - </div> - {/if} - </div> -</section> - -<style lang="scss"> - section { - position: relative; - flex-grow: 1; - overflow: hidden; - flex-shrink: 0; - margin: 10px; - display: flex; - flex-direction: column; - border: 1px solid #e5e5e5; - border-radius: 4px; - - header { - padding: 8px 10px; - border-bottom: 1px solid #e5e5e5; - display: flex; - align-items: center; - - .title { - font-weight: 600; - } - .info { - margin-left: 8px; - color: #71717a; - font-size: 0.875rem; - display: inline-flex; - align-items: center; - gap: 4px; - } - } - - [data-identifier='result-content'] { - position: relative; - flex-grow: 1; - overflow: hidden; - flex-shrink: 0; - display: flex; - flex-direction: column; - - .empty-state { - padding: 1rem; - - &.errors { - color: var(--danger-color); - } - - p { - margin: 0; - } - } - - :global(.sheet) { - bottom: 2.7rem; - } - - [data-identifier='status-bar'] { - flex-grow: 0; - flex-shrink: 0; - border-top: 1px solid #dfdfdf; - padding: 0.2rem 0.6rem; - background: #fafafa; - display: flex; - align-items: center; - margin-top: auto; - height: 2.7rem; - - :global(.pagination-group) { - margin-left: auto; - } - } - } - - :global(button.column-name-wrapper) { - flex: 1; - padding: 6px 8px; - overflow: hidden; - height: 100%; - display: block; - overflow: hidden; - text-align: left; - } - - :global(.column-name-wrapper.selected) { - background: #dedede !important; - } - :global([data-sheet-element='cell'].selected) { - background: #fafafa; - } - } -</style> diff --git a/mathesar_ui/src/systems/table-view/actions-pane/ActionsPane.svelte b/mathesar_ui/src/systems/table-view/actions-pane/ActionsPane.svelte index 4fb000356c..94cc7432bd 100644 --- a/mathesar_ui/src/systems/table-view/actions-pane/ActionsPane.svelte +++ b/mathesar_ui/src/systems/table-view/actions-pane/ActionsPane.svelte @@ -20,7 +20,7 @@ } from '@mathesar/icons'; import { getTabularDataStoreFromContext } from '@mathesar/stores/table-data'; import { States } from '@mathesar/utils/api'; - import { constructDataExplorerUrlToSummarizeFromGroup } from '@mathesar/systems/query-builder/urlSerializationUtils'; + import { constructDataExplorerUrlToSummarizeFromGroup } from '@mathesar/systems/data-explorer'; import Filter from './record-operations/Filter.svelte'; import Sort from './record-operations/Sort.svelte'; import Group from './record-operations/Group.svelte'; diff --git a/mathesar_ui/src/systems/table-view/header/Header.svelte b/mathesar_ui/src/systems/table-view/header/Header.svelte index 12e9e10e04..6c8383050a 100644 --- a/mathesar_ui/src/systems/table-view/header/Header.svelte +++ b/mathesar_ui/src/systems/table-view/header/Header.svelte @@ -30,6 +30,7 @@ <SheetCell columnIdentifierKey={ID_ROW_CONTROL_COLUMN} isStatic + isControlCell let:htmlAttributes let:style > diff --git a/mathesar_ui/src/systems/table-view/row/Row.svelte b/mathesar_ui/src/systems/table-view/row/Row.svelte index e96115d82f..83b96db7ca 100644 --- a/mathesar_ui/src/systems/table-view/row/Row.svelte +++ b/mathesar_ui/src/systems/table-view/row/Row.svelte @@ -84,15 +84,11 @@ <SheetCell columnIdentifierKey={ID_ROW_CONTROL_COLUMN} isStatic + isControlCell let:htmlAttributes let:style > - <div - class="row-control" - {...htmlAttributes} - {style} - on:click={handleRowClick} - > + <div {...htmlAttributes} {style} on:click={handleRowClick}> {#if row.record} <RowControl {primaryKeyColumnId} @@ -147,15 +143,6 @@ display: none; } - .row-control { - font-size: var(--text-size-x-small); - padding: 0 1.5rem; - color: var(--color-text-muted); - display: inline-flex; - align-items: center; - height: 100%; - } - &.is-add-placeholder { cursor: pointer; diff --git a/mathesar_ui/src/utils/preloadData.ts b/mathesar_ui/src/utils/preloadData.ts index 63bd1d8518..9e7aff8dff 100644 --- a/mathesar_ui/src/utils/preloadData.ts +++ b/mathesar_ui/src/utils/preloadData.ts @@ -4,7 +4,7 @@ import type { AbstractTypeResponse, } from '@mathesar/AppTypes'; import type { TableEntry } from '@mathesar/api/tables'; -import type { QueryInstance } from '@mathesar/api/queries/queryList'; +import type { QueryInstance } from '@mathesar/api/queries'; interface CommonData { databases: Database[];
aws-powertools__powertools-lambda-python-984
Logger: log_event does not serialize classes **What were you trying to accomplish?** I was trying to log the received S3 event. Please note that i am using the data classes present in this library for reading the event ```python @event_source(data_class=S3Event) @log.inject_lambda_context( log_event=True ) def lambda_handler(event: S3Event, context: LambdaContext): ``` ## Expected Behavior The logged event should have all the information from the S3 event ## Current Behavior This is the output i get in the log (trimmed) ```json { "level": "INFO", "message": "<aws_lambda_powertools.utilities.data_classes.s3_event.S3Event object at 0x7f0be7efb2b0>", "timestamp": "2022-01-11 06:36:20,111+0000", } ``` It looks to be that it was unable to properly represent the S3Event object as a string ## Possible Solution Implement __repr__ and __str__ methods in S3Event class or in the parent DictWrapper class ## Steps to Reproduce (for bugs) <!--- Provide a link to a live example, or an unambiguous set of steps to --> <!--- reproduce this bug. Include code to reproduce, if relevant --> 1. Implement a lambda which receives and S3 event like the following ```python @event_source(data_class=S3Event) @log.inject_lambda_context( log_event=True ) def lambda_handler(event: S3Event, context: LambdaContext): pass ``` 2. Setup the lambda trigger as S3 object creation event 3. Upload a file in the S3 bucket where trigger is setup 4. See the logs in cloud watch ## Environment * **Powertools version used**: 1.24.0 * **Packaging format (Layers, PyPi)**: PyPi * **AWS Lambda function runtime:** 3.9 * **Debugging logs** > [How to enable debug mode](https://awslabs.github.io/aws-lambda-powertools-python/#debug-mode)** ```python # paste logs here ```
[ { "content": "import functools\nimport inspect\nimport logging\nimport os\nimport random\nimport sys\nfrom typing import IO, Any, Callable, Dict, Iterable, Optional, TypeVar, Union\n\nimport jmespath\n\nfrom ..shared import constants\nfrom ..shared.functions import resolve_env_var_choice, resolve_truthy_env_var_choice\nfrom .exceptions import InvalidLoggerSamplingRateError\nfrom .filters import SuppressFilter\nfrom .formatter import BasePowertoolsFormatter, LambdaPowertoolsFormatter\nfrom .lambda_context import build_lambda_context_model\n\nlogger = logging.getLogger(__name__)\n\nis_cold_start = True\n\nPowertoolsFormatter = TypeVar(\"PowertoolsFormatter\", bound=BasePowertoolsFormatter)\n\n\ndef _is_cold_start() -> bool:\n \"\"\"Verifies whether is cold start\n\n Returns\n -------\n bool\n cold start bool value\n \"\"\"\n cold_start = False\n\n global is_cold_start\n if is_cold_start:\n cold_start = is_cold_start\n is_cold_start = False\n\n return cold_start\n\n\n# PyCharm does not support autocomplete via getattr\n# so we need to return to subclassing removed in #97\n# All methods/properties continue to be proxied to inner logger\n# https://github.com/awslabs/aws-lambda-powertools-python/issues/107\n# noinspection PyRedeclaration\nclass Logger(logging.Logger): # lgtm [py/missing-call-to-init]\n \"\"\"Creates and setups a logger to format statements in JSON.\n\n Includes service name and any additional key=value into logs\n It also accepts both service name or level explicitly via env vars\n\n Environment variables\n ---------------------\n POWERTOOLS_SERVICE_NAME : str\n service name\n LOG_LEVEL: str\n logging level (e.g. INFO, DEBUG)\n POWERTOOLS_LOGGER_SAMPLE_RATE: float\n sampling rate ranging from 0 to 1, 1 being 100% sampling\n\n Parameters\n ----------\n service : str, optional\n service name to be appended in logs, by default \"service_undefined\"\n level : str, int optional\n logging.level, by default \"INFO\"\n child: bool, optional\n create a child Logger named <service>.<caller_file_name>, False by default\n sample_rate: float, optional\n sample rate for debug calls within execution context defaults to 0.0\n stream: sys.stdout, optional\n valid output for a logging stream, by default sys.stdout\n logger_formatter: PowertoolsFormatter, optional\n custom logging formatter that implements PowertoolsFormatter\n logger_handler: logging.Handler, optional\n custom logging handler e.g. logging.FileHandler(\"file.log\")\n\n Parameters propagated to LambdaPowertoolsFormatter\n --------------------------------------------------\n datefmt: str, optional\n String directives (strftime) to format log timestamp using `time`, by default it uses RFC\n 3339.\n use_datetime_directive: str, optional\n Interpret `datefmt` as a format string for `datetime.datetime.strftime`, rather than\n `time.strftime`.\n\n See https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior . This\n also supports a custom %F directive for milliseconds.\n json_serializer : Callable, optional\n function to serialize `obj` to a JSON formatted `str`, by default json.dumps\n json_deserializer : Callable, optional\n function to deserialize `str`, `bytes`, bytearray` containing a JSON document to a Python `obj`,\n by default json.loads\n json_default : Callable, optional\n function to coerce unserializable values, by default `str()`\n\n Only used when no custom formatter is set\n utc : bool, optional\n set logging timestamp to UTC, by default False to continue to use local time as per stdlib\n log_record_order : list, optional\n set order of log keys when logging, by default [\"level\", \"location\", \"message\", \"timestamp\"]\n\n Example\n -------\n **Setups structured logging in JSON for Lambda functions with explicit service name**\n\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger(service=\"payment\")\n >>>\n >>> def handler(event, context):\n logger.info(\"Hello\")\n\n **Setups structured logging in JSON for Lambda functions using env vars**\n\n $ export POWERTOOLS_SERVICE_NAME=\"payment\"\n $ export POWERTOOLS_LOGGER_SAMPLE_RATE=0.01 # 1% debug sampling\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger()\n >>>\n >>> def handler(event, context):\n logger.info(\"Hello\")\n\n **Append payment_id to previously setup logger**\n\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger(service=\"payment\")\n >>>\n >>> def handler(event, context):\n logger.append_keys(payment_id=event[\"payment_id\"])\n logger.info(\"Hello\")\n\n **Create child Logger using logging inheritance via child param**\n\n >>> # app.py\n >>> import another_file\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger(service=\"payment\")\n >>>\n >>> # another_file.py\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger(service=\"payment\", child=True)\n\n **Logging in UTC timezone**\n\n >>> # app.py\n >>> import logging\n >>> from aws_lambda_powertools import Logger\n >>>\n >>> logger = Logger(service=\"payment\", utc=True)\n\n **Brings message as the first key in log statements**\n\n >>> # app.py\n >>> import logging\n >>> from aws_lambda_powertools import Logger\n >>>\n >>> logger = Logger(service=\"payment\", log_record_order=[\"message\"])\n\n **Logging to a file instead of standard output for testing**\n\n >>> # app.py\n >>> import logging\n >>> from aws_lambda_powertools import Logger\n >>>\n >>> logger = Logger(service=\"payment\", logger_handler=logging.FileHandler(\"log.json\"))\n\n Raises\n ------\n InvalidLoggerSamplingRateError\n When sampling rate provided is not a float\n \"\"\"\n\n def __init__(\n self,\n service: Optional[str] = None,\n level: Union[str, int, None] = None,\n child: bool = False,\n sampling_rate: Optional[float] = None,\n stream: Optional[IO[str]] = None,\n logger_formatter: Optional[PowertoolsFormatter] = None,\n logger_handler: Optional[logging.Handler] = None,\n **kwargs,\n ):\n self.service = resolve_env_var_choice(\n choice=service, env=os.getenv(constants.SERVICE_NAME_ENV, \"service_undefined\")\n )\n self.sampling_rate = resolve_env_var_choice(\n choice=sampling_rate, env=os.getenv(constants.LOGGER_LOG_SAMPLING_RATE)\n )\n self.child = child\n self.logger_formatter = logger_formatter\n self.logger_handler = logger_handler or logging.StreamHandler(stream)\n self.log_level = self._get_log_level(level)\n self._is_deduplication_disabled = resolve_truthy_env_var_choice(\n env=os.getenv(constants.LOGGER_LOG_DEDUPLICATION_ENV, \"false\")\n )\n self._default_log_keys = {\"service\": self.service, \"sampling_rate\": self.sampling_rate}\n self._logger = self._get_logger()\n\n self._init_logger(**kwargs)\n\n def __getattr__(self, name):\n # Proxy attributes not found to actual logger to support backward compatibility\n # https://github.com/awslabs/aws-lambda-powertools-python/issues/97\n return getattr(self._logger, name)\n\n def _get_logger(self):\n \"\"\"Returns a Logger named {self.service}, or {self.service.filename} for child loggers\"\"\"\n logger_name = self.service\n if self.child:\n logger_name = f\"{self.service}.{self._get_caller_filename()}\"\n\n return logging.getLogger(logger_name)\n\n def _init_logger(self, **kwargs):\n \"\"\"Configures new logger\"\"\"\n\n # Skip configuration if it's a child logger or a pre-configured logger\n # to prevent the following:\n # a) multiple handlers being attached\n # b) different sampling mechanisms\n # c) multiple messages from being logged as handlers can be duplicated\n is_logger_preconfigured = getattr(self._logger, \"init\", False)\n if self.child or is_logger_preconfigured:\n return\n\n self._configure_sampling()\n self._logger.setLevel(self.log_level)\n self._logger.addHandler(self.logger_handler)\n self.structure_logs(**kwargs)\n\n # Pytest Live Log feature duplicates log records for colored output\n # but we explicitly add a filter for log deduplication.\n # This flag disables this protection when you explicit want logs to be duplicated (#262)\n if not self._is_deduplication_disabled:\n logger.debug(\"Adding filter in root logger to suppress child logger records to bubble up\")\n for handler in logging.root.handlers:\n # It'll add a filter to suppress any child logger from self.service\n # Example: `Logger(service=\"order\")`, where service is Order\n # It'll reject all loggers starting with `order` e.g. order.checkout, order.shared\n handler.addFilter(SuppressFilter(self.service))\n\n # as per bug in #249, we should not be pre-configuring an existing logger\n # therefore we set a custom attribute in the Logger that will be returned\n # std logging will return the same Logger with our attribute if name is reused\n logger.debug(f\"Marking logger {self.service} as preconfigured\")\n self._logger.init = True\n\n def _configure_sampling(self):\n \"\"\"Dynamically set log level based on sampling rate\n\n Raises\n ------\n InvalidLoggerSamplingRateError\n When sampling rate provided is not a float\n \"\"\"\n try:\n if self.sampling_rate and random.random() <= float(self.sampling_rate):\n logger.debug(\"Setting log level to Debug due to sampling rate\")\n self.log_level = logging.DEBUG\n except ValueError:\n raise InvalidLoggerSamplingRateError(\n f\"Expected a float value ranging 0 to 1, but received {self.sampling_rate} instead.\"\n f\"Please review POWERTOOLS_LOGGER_SAMPLE_RATE environment variable.\"\n )\n\n def inject_lambda_context(\n self,\n lambda_handler: Optional[Callable[[Dict, Any], Any]] = None,\n log_event: Optional[bool] = None,\n correlation_id_path: Optional[str] = None,\n clear_state: Optional[bool] = False,\n ):\n \"\"\"Decorator to capture Lambda contextual info and inject into logger\n\n Parameters\n ----------\n clear_state : bool, optional\n Instructs logger to remove any custom keys previously added\n lambda_handler : Callable\n Method to inject the lambda context\n log_event : bool, optional\n Instructs logger to log Lambda Event, by default False\n correlation_id_path: str, optional\n Optional JMESPath for the correlation_id\n\n Environment variables\n ---------------------\n POWERTOOLS_LOGGER_LOG_EVENT : str\n instruct logger to log Lambda Event (e.g. `\"true\", \"True\", \"TRUE\"`)\n\n Example\n -------\n **Captures Lambda contextual runtime info (e.g memory, arn, req_id)**\n\n from aws_lambda_powertools import Logger\n\n logger = Logger(service=\"payment\")\n\n @logger.inject_lambda_context\n def handler(event, context):\n logger.info(\"Hello\")\n\n **Captures Lambda contextual runtime info and logs incoming request**\n\n from aws_lambda_powertools import Logger\n\n logger = Logger(service=\"payment\")\n\n @logger.inject_lambda_context(log_event=True)\n def handler(event, context):\n logger.info(\"Hello\")\n\n Returns\n -------\n decorate : Callable\n Decorated lambda handler\n \"\"\"\n\n # If handler is None we've been called with parameters\n # Return a partial function with args filled\n if lambda_handler is None:\n logger.debug(\"Decorator called with parameters\")\n return functools.partial(\n self.inject_lambda_context,\n log_event=log_event,\n correlation_id_path=correlation_id_path,\n clear_state=clear_state,\n )\n\n log_event = resolve_truthy_env_var_choice(\n env=os.getenv(constants.LOGGER_LOG_EVENT_ENV, \"false\"), choice=log_event\n )\n\n @functools.wraps(lambda_handler)\n def decorate(event, context, **kwargs):\n lambda_context = build_lambda_context_model(context)\n cold_start = _is_cold_start()\n\n if clear_state:\n self.structure_logs(cold_start=cold_start, **lambda_context.__dict__)\n else:\n self.append_keys(cold_start=cold_start, **lambda_context.__dict__)\n\n if correlation_id_path:\n self.set_correlation_id(jmespath.search(correlation_id_path, event))\n\n if log_event:\n logger.debug(\"Event received\")\n self.info(event)\n\n return lambda_handler(event, context)\n\n return decorate\n\n def append_keys(self, **additional_keys):\n self.registered_formatter.append_keys(**additional_keys)\n\n def remove_keys(self, keys: Iterable[str]):\n self.registered_formatter.remove_keys(keys)\n\n @property\n def registered_handler(self) -> logging.Handler:\n \"\"\"Convenience property to access logger handler\"\"\"\n handlers = self._logger.parent.handlers if self.child else self._logger.handlers\n return handlers[0]\n\n @property\n def registered_formatter(self) -> PowertoolsFormatter:\n \"\"\"Convenience property to access logger formatter\"\"\"\n return self.registered_handler.formatter # type: ignore\n\n def structure_logs(self, append: bool = False, **keys):\n \"\"\"Sets logging formatting to JSON.\n\n Optionally, it can append keyword arguments\n to an existing logger so it is available across future log statements.\n\n Last keyword argument and value wins if duplicated.\n\n Parameters\n ----------\n append : bool, optional\n append keys provided to logger formatter, by default False\n \"\"\"\n\n if append:\n # Maintenance: Add deprecation warning for major version. Refer to append_keys() when docs are updated\n self.append_keys(**keys)\n else:\n log_keys = {**self._default_log_keys, **keys}\n formatter = self.logger_formatter or LambdaPowertoolsFormatter(**log_keys) # type: ignore\n self.registered_handler.setFormatter(formatter)\n\n def set_correlation_id(self, value: Optional[str]):\n \"\"\"Sets the correlation_id in the logging json\n\n Parameters\n ----------\n value : str, optional\n Value for the correlation id. None will remove the correlation_id\n \"\"\"\n self.append_keys(correlation_id=value)\n\n def get_correlation_id(self) -> Optional[str]:\n \"\"\"Gets the correlation_id in the logging json\n\n Returns\n -------\n str, optional\n Value for the correlation id\n \"\"\"\n if isinstance(self.registered_formatter, LambdaPowertoolsFormatter):\n return self.registered_formatter.log_format.get(\"correlation_id\")\n return None\n\n @staticmethod\n def _get_log_level(level: Union[str, int, None]) -> Union[str, int]:\n \"\"\"Returns preferred log level set by the customer in upper case\"\"\"\n if isinstance(level, int):\n return level\n\n log_level: Optional[str] = level or os.getenv(\"LOG_LEVEL\")\n if log_level is None:\n return logging.INFO\n\n return log_level.upper()\n\n @staticmethod\n def _get_caller_filename():\n \"\"\"Return caller filename by finding the caller frame\"\"\"\n # Current frame => _get_logger()\n # Previous frame => logger.py\n # Before previous frame => Caller\n frame = inspect.currentframe()\n caller_frame = frame.f_back.f_back.f_back\n return caller_frame.f_globals[\"__name__\"]\n\n\ndef set_package_logger(\n level: Union[str, int] = logging.DEBUG,\n stream: Optional[IO[str]] = None,\n formatter: Optional[logging.Formatter] = None,\n):\n \"\"\"Set an additional stream handler, formatter, and log level for aws_lambda_powertools package logger.\n\n **Package log by default is suppressed (NullHandler), this should only used for debugging.\n This is separate from application Logger class utility**\n\n Example\n -------\n **Enables debug logging for AWS Lambda Powertools package**\n\n >>> aws_lambda_powertools.logging.logger import set_package_logger\n >>> set_package_logger()\n\n Parameters\n ----------\n level: str, int\n log level, DEBUG by default\n stream: sys.stdout\n log stream, stdout by default\n formatter: logging.Formatter\n log formatter, \"%(asctime)s %(name)s [%(levelname)s] %(message)s\" by default\n \"\"\"\n if formatter is None:\n formatter = logging.Formatter(\"%(asctime)s %(name)s [%(levelname)s] %(message)s\")\n\n if stream is None:\n stream = sys.stdout\n\n logger = logging.getLogger(\"aws_lambda_powertools\")\n logger.setLevel(level)\n handler = logging.StreamHandler(stream)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n", "path": "aws_lambda_powertools/logging/logger.py" } ]
[ { "content": "import functools\nimport inspect\nimport logging\nimport os\nimport random\nimport sys\nfrom typing import IO, Any, Callable, Dict, Iterable, Optional, TypeVar, Union\n\nimport jmespath\n\nfrom ..shared import constants\nfrom ..shared.functions import resolve_env_var_choice, resolve_truthy_env_var_choice\nfrom .exceptions import InvalidLoggerSamplingRateError\nfrom .filters import SuppressFilter\nfrom .formatter import BasePowertoolsFormatter, LambdaPowertoolsFormatter\nfrom .lambda_context import build_lambda_context_model\n\nlogger = logging.getLogger(__name__)\n\nis_cold_start = True\n\nPowertoolsFormatter = TypeVar(\"PowertoolsFormatter\", bound=BasePowertoolsFormatter)\n\n\ndef _is_cold_start() -> bool:\n \"\"\"Verifies whether is cold start\n\n Returns\n -------\n bool\n cold start bool value\n \"\"\"\n cold_start = False\n\n global is_cold_start\n if is_cold_start:\n cold_start = is_cold_start\n is_cold_start = False\n\n return cold_start\n\n\n# PyCharm does not support autocomplete via getattr\n# so we need to return to subclassing removed in #97\n# All methods/properties continue to be proxied to inner logger\n# https://github.com/awslabs/aws-lambda-powertools-python/issues/107\n# noinspection PyRedeclaration\nclass Logger(logging.Logger): # lgtm [py/missing-call-to-init]\n \"\"\"Creates and setups a logger to format statements in JSON.\n\n Includes service name and any additional key=value into logs\n It also accepts both service name or level explicitly via env vars\n\n Environment variables\n ---------------------\n POWERTOOLS_SERVICE_NAME : str\n service name\n LOG_LEVEL: str\n logging level (e.g. INFO, DEBUG)\n POWERTOOLS_LOGGER_SAMPLE_RATE: float\n sampling rate ranging from 0 to 1, 1 being 100% sampling\n\n Parameters\n ----------\n service : str, optional\n service name to be appended in logs, by default \"service_undefined\"\n level : str, int optional\n logging.level, by default \"INFO\"\n child: bool, optional\n create a child Logger named <service>.<caller_file_name>, False by default\n sample_rate: float, optional\n sample rate for debug calls within execution context defaults to 0.0\n stream: sys.stdout, optional\n valid output for a logging stream, by default sys.stdout\n logger_formatter: PowertoolsFormatter, optional\n custom logging formatter that implements PowertoolsFormatter\n logger_handler: logging.Handler, optional\n custom logging handler e.g. logging.FileHandler(\"file.log\")\n\n Parameters propagated to LambdaPowertoolsFormatter\n --------------------------------------------------\n datefmt: str, optional\n String directives (strftime) to format log timestamp using `time`, by default it uses RFC\n 3339.\n use_datetime_directive: str, optional\n Interpret `datefmt` as a format string for `datetime.datetime.strftime`, rather than\n `time.strftime`.\n\n See https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior . This\n also supports a custom %F directive for milliseconds.\n json_serializer : Callable, optional\n function to serialize `obj` to a JSON formatted `str`, by default json.dumps\n json_deserializer : Callable, optional\n function to deserialize `str`, `bytes`, bytearray` containing a JSON document to a Python `obj`,\n by default json.loads\n json_default : Callable, optional\n function to coerce unserializable values, by default `str()`\n\n Only used when no custom formatter is set\n utc : bool, optional\n set logging timestamp to UTC, by default False to continue to use local time as per stdlib\n log_record_order : list, optional\n set order of log keys when logging, by default [\"level\", \"location\", \"message\", \"timestamp\"]\n\n Example\n -------\n **Setups structured logging in JSON for Lambda functions with explicit service name**\n\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger(service=\"payment\")\n >>>\n >>> def handler(event, context):\n logger.info(\"Hello\")\n\n **Setups structured logging in JSON for Lambda functions using env vars**\n\n $ export POWERTOOLS_SERVICE_NAME=\"payment\"\n $ export POWERTOOLS_LOGGER_SAMPLE_RATE=0.01 # 1% debug sampling\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger()\n >>>\n >>> def handler(event, context):\n logger.info(\"Hello\")\n\n **Append payment_id to previously setup logger**\n\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger(service=\"payment\")\n >>>\n >>> def handler(event, context):\n logger.append_keys(payment_id=event[\"payment_id\"])\n logger.info(\"Hello\")\n\n **Create child Logger using logging inheritance via child param**\n\n >>> # app.py\n >>> import another_file\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger(service=\"payment\")\n >>>\n >>> # another_file.py\n >>> from aws_lambda_powertools import Logger\n >>> logger = Logger(service=\"payment\", child=True)\n\n **Logging in UTC timezone**\n\n >>> # app.py\n >>> import logging\n >>> from aws_lambda_powertools import Logger\n >>>\n >>> logger = Logger(service=\"payment\", utc=True)\n\n **Brings message as the first key in log statements**\n\n >>> # app.py\n >>> import logging\n >>> from aws_lambda_powertools import Logger\n >>>\n >>> logger = Logger(service=\"payment\", log_record_order=[\"message\"])\n\n **Logging to a file instead of standard output for testing**\n\n >>> # app.py\n >>> import logging\n >>> from aws_lambda_powertools import Logger\n >>>\n >>> logger = Logger(service=\"payment\", logger_handler=logging.FileHandler(\"log.json\"))\n\n Raises\n ------\n InvalidLoggerSamplingRateError\n When sampling rate provided is not a float\n \"\"\"\n\n def __init__(\n self,\n service: Optional[str] = None,\n level: Union[str, int, None] = None,\n child: bool = False,\n sampling_rate: Optional[float] = None,\n stream: Optional[IO[str]] = None,\n logger_formatter: Optional[PowertoolsFormatter] = None,\n logger_handler: Optional[logging.Handler] = None,\n **kwargs,\n ):\n self.service = resolve_env_var_choice(\n choice=service, env=os.getenv(constants.SERVICE_NAME_ENV, \"service_undefined\")\n )\n self.sampling_rate = resolve_env_var_choice(\n choice=sampling_rate, env=os.getenv(constants.LOGGER_LOG_SAMPLING_RATE)\n )\n self.child = child\n self.logger_formatter = logger_formatter\n self.logger_handler = logger_handler or logging.StreamHandler(stream)\n self.log_level = self._get_log_level(level)\n self._is_deduplication_disabled = resolve_truthy_env_var_choice(\n env=os.getenv(constants.LOGGER_LOG_DEDUPLICATION_ENV, \"false\")\n )\n self._default_log_keys = {\"service\": self.service, \"sampling_rate\": self.sampling_rate}\n self._logger = self._get_logger()\n\n self._init_logger(**kwargs)\n\n def __getattr__(self, name):\n # Proxy attributes not found to actual logger to support backward compatibility\n # https://github.com/awslabs/aws-lambda-powertools-python/issues/97\n return getattr(self._logger, name)\n\n def _get_logger(self):\n \"\"\"Returns a Logger named {self.service}, or {self.service.filename} for child loggers\"\"\"\n logger_name = self.service\n if self.child:\n logger_name = f\"{self.service}.{self._get_caller_filename()}\"\n\n return logging.getLogger(logger_name)\n\n def _init_logger(self, **kwargs):\n \"\"\"Configures new logger\"\"\"\n\n # Skip configuration if it's a child logger or a pre-configured logger\n # to prevent the following:\n # a) multiple handlers being attached\n # b) different sampling mechanisms\n # c) multiple messages from being logged as handlers can be duplicated\n is_logger_preconfigured = getattr(self._logger, \"init\", False)\n if self.child or is_logger_preconfigured:\n return\n\n self._configure_sampling()\n self._logger.setLevel(self.log_level)\n self._logger.addHandler(self.logger_handler)\n self.structure_logs(**kwargs)\n\n # Pytest Live Log feature duplicates log records for colored output\n # but we explicitly add a filter for log deduplication.\n # This flag disables this protection when you explicit want logs to be duplicated (#262)\n if not self._is_deduplication_disabled:\n logger.debug(\"Adding filter in root logger to suppress child logger records to bubble up\")\n for handler in logging.root.handlers:\n # It'll add a filter to suppress any child logger from self.service\n # Example: `Logger(service=\"order\")`, where service is Order\n # It'll reject all loggers starting with `order` e.g. order.checkout, order.shared\n handler.addFilter(SuppressFilter(self.service))\n\n # as per bug in #249, we should not be pre-configuring an existing logger\n # therefore we set a custom attribute in the Logger that will be returned\n # std logging will return the same Logger with our attribute if name is reused\n logger.debug(f\"Marking logger {self.service} as preconfigured\")\n self._logger.init = True\n\n def _configure_sampling(self):\n \"\"\"Dynamically set log level based on sampling rate\n\n Raises\n ------\n InvalidLoggerSamplingRateError\n When sampling rate provided is not a float\n \"\"\"\n try:\n if self.sampling_rate and random.random() <= float(self.sampling_rate):\n logger.debug(\"Setting log level to Debug due to sampling rate\")\n self.log_level = logging.DEBUG\n except ValueError:\n raise InvalidLoggerSamplingRateError(\n f\"Expected a float value ranging 0 to 1, but received {self.sampling_rate} instead.\"\n f\"Please review POWERTOOLS_LOGGER_SAMPLE_RATE environment variable.\"\n )\n\n def inject_lambda_context(\n self,\n lambda_handler: Optional[Callable[[Dict, Any], Any]] = None,\n log_event: Optional[bool] = None,\n correlation_id_path: Optional[str] = None,\n clear_state: Optional[bool] = False,\n ):\n \"\"\"Decorator to capture Lambda contextual info and inject into logger\n\n Parameters\n ----------\n clear_state : bool, optional\n Instructs logger to remove any custom keys previously added\n lambda_handler : Callable\n Method to inject the lambda context\n log_event : bool, optional\n Instructs logger to log Lambda Event, by default False\n correlation_id_path: str, optional\n Optional JMESPath for the correlation_id\n\n Environment variables\n ---------------------\n POWERTOOLS_LOGGER_LOG_EVENT : str\n instruct logger to log Lambda Event (e.g. `\"true\", \"True\", \"TRUE\"`)\n\n Example\n -------\n **Captures Lambda contextual runtime info (e.g memory, arn, req_id)**\n\n from aws_lambda_powertools import Logger\n\n logger = Logger(service=\"payment\")\n\n @logger.inject_lambda_context\n def handler(event, context):\n logger.info(\"Hello\")\n\n **Captures Lambda contextual runtime info and logs incoming request**\n\n from aws_lambda_powertools import Logger\n\n logger = Logger(service=\"payment\")\n\n @logger.inject_lambda_context(log_event=True)\n def handler(event, context):\n logger.info(\"Hello\")\n\n Returns\n -------\n decorate : Callable\n Decorated lambda handler\n \"\"\"\n\n # If handler is None we've been called with parameters\n # Return a partial function with args filled\n if lambda_handler is None:\n logger.debug(\"Decorator called with parameters\")\n return functools.partial(\n self.inject_lambda_context,\n log_event=log_event,\n correlation_id_path=correlation_id_path,\n clear_state=clear_state,\n )\n\n log_event = resolve_truthy_env_var_choice(\n env=os.getenv(constants.LOGGER_LOG_EVENT_ENV, \"false\"), choice=log_event\n )\n\n @functools.wraps(lambda_handler)\n def decorate(event, context, **kwargs):\n lambda_context = build_lambda_context_model(context)\n cold_start = _is_cold_start()\n\n if clear_state:\n self.structure_logs(cold_start=cold_start, **lambda_context.__dict__)\n else:\n self.append_keys(cold_start=cold_start, **lambda_context.__dict__)\n\n if correlation_id_path:\n self.set_correlation_id(jmespath.search(correlation_id_path, event))\n\n if log_event:\n logger.debug(\"Event received\")\n self.info(getattr(event, \"raw_event\", event))\n\n return lambda_handler(event, context)\n\n return decorate\n\n def append_keys(self, **additional_keys):\n self.registered_formatter.append_keys(**additional_keys)\n\n def remove_keys(self, keys: Iterable[str]):\n self.registered_formatter.remove_keys(keys)\n\n @property\n def registered_handler(self) -> logging.Handler:\n \"\"\"Convenience property to access logger handler\"\"\"\n handlers = self._logger.parent.handlers if self.child else self._logger.handlers\n return handlers[0]\n\n @property\n def registered_formatter(self) -> PowertoolsFormatter:\n \"\"\"Convenience property to access logger formatter\"\"\"\n return self.registered_handler.formatter # type: ignore\n\n def structure_logs(self, append: bool = False, **keys):\n \"\"\"Sets logging formatting to JSON.\n\n Optionally, it can append keyword arguments\n to an existing logger so it is available across future log statements.\n\n Last keyword argument and value wins if duplicated.\n\n Parameters\n ----------\n append : bool, optional\n append keys provided to logger formatter, by default False\n \"\"\"\n\n if append:\n # Maintenance: Add deprecation warning for major version. Refer to append_keys() when docs are updated\n self.append_keys(**keys)\n else:\n log_keys = {**self._default_log_keys, **keys}\n formatter = self.logger_formatter or LambdaPowertoolsFormatter(**log_keys) # type: ignore\n self.registered_handler.setFormatter(formatter)\n\n def set_correlation_id(self, value: Optional[str]):\n \"\"\"Sets the correlation_id in the logging json\n\n Parameters\n ----------\n value : str, optional\n Value for the correlation id. None will remove the correlation_id\n \"\"\"\n self.append_keys(correlation_id=value)\n\n def get_correlation_id(self) -> Optional[str]:\n \"\"\"Gets the correlation_id in the logging json\n\n Returns\n -------\n str, optional\n Value for the correlation id\n \"\"\"\n if isinstance(self.registered_formatter, LambdaPowertoolsFormatter):\n return self.registered_formatter.log_format.get(\"correlation_id\")\n return None\n\n @staticmethod\n def _get_log_level(level: Union[str, int, None]) -> Union[str, int]:\n \"\"\"Returns preferred log level set by the customer in upper case\"\"\"\n if isinstance(level, int):\n return level\n\n log_level: Optional[str] = level or os.getenv(\"LOG_LEVEL\")\n if log_level is None:\n return logging.INFO\n\n return log_level.upper()\n\n @staticmethod\n def _get_caller_filename():\n \"\"\"Return caller filename by finding the caller frame\"\"\"\n # Current frame => _get_logger()\n # Previous frame => logger.py\n # Before previous frame => Caller\n frame = inspect.currentframe()\n caller_frame = frame.f_back.f_back.f_back\n return caller_frame.f_globals[\"__name__\"]\n\n\ndef set_package_logger(\n level: Union[str, int] = logging.DEBUG,\n stream: Optional[IO[str]] = None,\n formatter: Optional[logging.Formatter] = None,\n):\n \"\"\"Set an additional stream handler, formatter, and log level for aws_lambda_powertools package logger.\n\n **Package log by default is suppressed (NullHandler), this should only used for debugging.\n This is separate from application Logger class utility**\n\n Example\n -------\n **Enables debug logging for AWS Lambda Powertools package**\n\n >>> aws_lambda_powertools.logging.logger import set_package_logger\n >>> set_package_logger()\n\n Parameters\n ----------\n level: str, int\n log level, DEBUG by default\n stream: sys.stdout\n log stream, stdout by default\n formatter: logging.Formatter\n log formatter, \"%(asctime)s %(name)s [%(levelname)s] %(message)s\" by default\n \"\"\"\n if formatter is None:\n formatter = logging.Formatter(\"%(asctime)s %(name)s [%(levelname)s] %(message)s\")\n\n if stream is None:\n stream = sys.stdout\n\n logger = logging.getLogger(\"aws_lambda_powertools\")\n logger.setLevel(level)\n handler = logging.StreamHandler(stream)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n", "path": "aws_lambda_powertools/logging/logger.py" } ]
diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 938742fb0a3..49321181b48 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -349,7 +349,7 @@ def decorate(event, context, **kwargs): if log_event: logger.debug("Event received") - self.info(event) + self.info(getattr(event, "raw_event", event)) return lambda_handler(event, context) diff --git a/tests/functional/test_logger.py b/tests/functional/test_logger.py index 6b05119b88b..20b0a74fc64 100644 --- a/tests/functional/test_logger.py +++ b/tests/functional/test_logger.py @@ -17,6 +17,7 @@ from aws_lambda_powertools.logging.formatter import BasePowertoolsFormatter from aws_lambda_powertools.logging.logger import set_package_logger from aws_lambda_powertools.shared import constants +from aws_lambda_powertools.utilities.data_classes import S3Event, event_source @pytest.fixture @@ -635,3 +636,21 @@ def test_use_datetime(stdout, service_name, utc): assert re.fullmatch( f"custom timestamp: milliseconds=[0-9]+ microseconds=[0-9]+ timezone={re.escape(expected_tz)}", log["timestamp"] ) + + +def test_inject_lambda_context_log_event_request_data_classes(lambda_context, stdout, lambda_event, service_name): + # GIVEN Logger is initialized + logger = Logger(service=service_name, stream=stdout) + + # WHEN a lambda function is decorated with logger instructed to log event + # AND the event is an event source data class + @event_source(data_class=S3Event) + @logger.inject_lambda_context(log_event=True) + def handler(event, context): + logger.info("Hello") + + handler(lambda_event, lambda_context) + + # THEN logger should log event received from Lambda + logged_event, _ = capture_multiple_logging_statements_output(stdout) + assert logged_event["message"] == lambda_event
googleapis__google-auth-library-python-671
Use extra for asyncio dependencies Hello! The latest release for this library pulls in aiohttp and its dependencies unconditionally, which adds non-trivial burden to projects that don’t need it. Would you consider using a packaging extra so that people can opt-in? TODO: undo pin of 'aiohttp' once 'aioresponses' releases a fix Environment details - OS: $ sw_vers ProductName: Mac OS X ProductVersion: 10.14.6 BuildVersion: 18G6020 - Python version: 3.6, 3.7, 3.8 - pip version: pip 20.2.4 - `google-auth` version: 5906c8583ca351b5385a079a30521a9a8a0c7c59 #### Steps to reproduce 1. nox -s unit There are 9 tests that fail, all with the same error: `TypeError: __init__() missing 1 required positional argument: 'limit'` ``` ====================================================== short test summary info ======================================================= FAILED tests_async/transport/test_aiohttp_requests.py::TestCombinedResponse::test_content_compressed - TypeError: __init__() missin... FAILED tests_async/transport/test_aiohttp_requests.py::TestResponse::test_headers_prop - TypeError: __init__() missing 1 required p... FAILED tests_async/transport/test_aiohttp_requests.py::TestResponse::test_status_prop - TypeError: __init__() missing 1 required po... FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_request - TypeError: __init__() missing 1 requir... FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_ctx - TypeError: __init__() missing 1 required p... FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_http_headers - TypeError: __init__() missing 1 r... FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_regexp_example - TypeError: __init__() missing 1... FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_request_no_refresh - TypeError: __init__() missi... FAILED tests_async/transport/test_aiohttp_requests.py::TestAuthorizedSession::test_request_refresh - TypeError: __init__() missing ... ============================================ 9 failed, 609 passed, 12 warnings in 33.41s ============================================= ``` Here is the traceback for one of the failing tests: ``` ____________________________________________ TestCombinedResponse.test_content_compressed ____________________________________________ self = <tests_async.transport.test_aiohttp_requests.TestCombinedResponse object at 0x108803160> urllib3_mock = <function decompress at 0x10880a820> @mock.patch( "google.auth.transport._aiohttp_requests.urllib3.response.MultiDecoder.decompress", return_value="decompressed", autospec=True, ) @pytest.mark.asyncio async def test_content_compressed(self, urllib3_mock): rm = core.RequestMatch( "url", headers={"Content-Encoding": "gzip"}, payload="compressed" ) > response = await rm.build_response(core.URL("url")) tests_async/transport/test_aiohttp_requests.py:72: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../../.virtualenv/google-auth-library-python/lib/python3.8/site-packages/aioresponses/core.py:192: in build_response resp = self._build_response( ../../../.virtualenv/google-auth-library-python/lib/python3.8/site-packages/aioresponses/core.py:173: in _build_response resp.content = stream_reader_factory(loop) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ loop = <Mock id='4437587472'> def stream_reader_factory( # noqa loop: 'Optional[asyncio.AbstractEventLoop]' = None ): protocol = ResponseHandler(loop=loop) > return StreamReader(protocol, loop=loop) E TypeError: __init__() missing 1 required positional argument: 'limit' ../../../.virtualenv/google-auth-library-python/lib/python3.8/site-packages/aioresponses/compat.py:48: TypeError ========================================================== warnings summary ========================================================== ``` The root cause is a change in aiohttp version 3.7.0 which was released a few hours ago. The signature for StreamReader has changed, making the optional argument `limit` a required argument. https://github.com/aio-libs/aiohttp/blob/56e78836aa7c67292ace9e256711699d51d57285/aiohttp/streams.py#L106 This change breaks aioresponses: https://github.com/pnuckowski/aioresponses/blob/e61977f42a0164e0c572031dfb18ae95ba198df0/aioresponses/compat.py#L44 Add support for Python 3.9
[ { "content": "import synthtool as s\nfrom synthtool import gcp\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(unit_cov_level=100, cov_level=100)\ns.move(\n templated_files / \".kokoro\",\n excludes=[\n \".kokoro/continuous/common.cfg\",\n \".kokoro/presubmit/common.cfg\",\n \".kokoro/build.sh\",\n ],\n) # just move kokoro configs\n", "path": "synth.py" } ]
[ { "content": "import synthtool as s\nfrom synthtool import gcp\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(unit_cov_level=100, cov_level=100)\ns.move(\n templated_files / \".kokoro\",\n excludes=[\n \"continuous/common.cfg\",\n \"presubmit/common.cfg\",\n \"build.sh\",\n ],\n) # just move kokoro configs\n", "path": "synth.py" } ]
diff --git a/.gitignore b/.gitignore index f01e60ec0..1f0b7e3c7 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ scripts/local_test_setup tests/data/key.json tests/data/key.p12 tests/data/user-key.json +system_tests/data/ # PyCharm configuration: .idea diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 3a63e98c6..1f96e21d7 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -15,7 +15,11 @@ set -eo pipefail -cd github/google-auth-library-python +if [[ -z "${PROJECT_ROOT:-}" ]]; then + PROJECT_ROOT="github/google-auth-library-python" +fi + +cd "${PROJECT_ROOT}" # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -27,19 +31,33 @@ env | grep KOKORO export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json # Setup project id. -export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") +export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.txt") + +# Activate gcloud with service account credentials +gcloud auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS +gcloud config set project ${PROJECT_ID} + +# Decrypt system test secrets +./scripts/decrypt-secrets.sh # Remove old nox -python3.6 -m pip uninstall --yes --quiet nox-automation +python3 -m pip uninstall --yes --quiet nox-automation # Install nox -python3.6 -m pip install --upgrade --quiet nox -python3.6 -m nox --version +python3 -m pip install --upgrade --quiet nox +python3 -m nox --version # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then - python3.6 -m nox -s "${NOX_SESSION:-}" + python3 -m nox -s ${NOX_SESSION:-} else - python3.6 -m nox + python3 -m nox fi + + +# Decrypt system test secrets +./scripts/decrypt-secrets.sh + +# Run system tests which use a different noxfile +python3 -m nox -f system_tests/noxfile.py \ No newline at end of file diff --git a/.kokoro/continuous/common.cfg b/.kokoro/continuous/common.cfg index c587b4104..10910e357 100644 --- a/.kokoro/continuous/common.cfg +++ b/.kokoro/continuous/common.cfg @@ -11,7 +11,7 @@ action { gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Download resources for system tests (service account key, etc.) -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-auth-library-python" # Use the trampoline script to run in docker. build_file: "google-auth-library-python/.kokoro/trampoline.sh" diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg index 111810782..d0f5783d5 100644 --- a/.kokoro/docs/docs-presubmit.cfg +++ b/.kokoro/docs/docs-presubmit.cfg @@ -15,3 +15,14 @@ env_vars: { key: "TRAMPOLINE_IMAGE_UPLOAD" value: "false" } + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/google-auth-library-python/.kokoro/build.sh" +} + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "docs docfx" +} diff --git a/.kokoro/presubmit/common.cfg b/.kokoro/presubmit/common.cfg index c587b4104..10910e357 100644 --- a/.kokoro/presubmit/common.cfg +++ b/.kokoro/presubmit/common.cfg @@ -11,7 +11,7 @@ action { gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Download resources for system tests (service account key, etc.) -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-auth-library-python" # Use the trampoline script to run in docker. build_file: "google-auth-library-python/.kokoro/trampoline.sh" diff --git a/synth.metadata b/synth.metadata index 5e1ef9a55..0de642bf7 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,14 +4,14 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/google-auth-library-python.git", - "sha": "9c4200dff31986b7ff300126e9aa35d14aa84dba" + "sha": "f062da8392c32fb3306cdc6e4dbae78212aa0dc7" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "da5c6050d13b4950c82666a81d8acd25157664ae" + "sha": "16ec872dd898d7de6e1822badfac32484b5d9031" } } ], diff --git a/synth.py b/synth.py index 49bf2dda6..f692f7010 100644 --- a/synth.py +++ b/synth.py @@ -10,8 +10,8 @@ s.move( templated_files / ".kokoro", excludes=[ - ".kokoro/continuous/common.cfg", - ".kokoro/presubmit/common.cfg", - ".kokoro/build.sh", + "continuous/common.cfg", + "presubmit/common.cfg", + "build.sh", ], ) # just move kokoro configs diff --git a/system_tests/__init__.py b/system_tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/system_tests/noxfile.py b/system_tests/noxfile.py index dcfe8ee81..5d0014bc8 100644 --- a/system_tests/noxfile.py +++ b/system_tests/noxfile.py @@ -30,7 +30,7 @@ import py.path HERE = os.path.abspath(os.path.dirname(__file__)) -LIBRARY_DIR = os.path.join(HERE, "..") +LIBRARY_DIR = os.path.abspath(os.path.dirname(HERE)) DATA_DIR = os.path.join(HERE, "data") SERVICE_ACCOUNT_FILE = os.path.join(DATA_DIR, "service_account.json") AUTHORIZED_USER_FILE = os.path.join(DATA_DIR, "authorized_user.json") @@ -169,7 +169,7 @@ def configure_cloud_sdk(session, application_default_credentials, project=False) # Test sesssions TEST_DEPENDENCIES_ASYNC = ["aiohttp", "pytest-asyncio", "nest-asyncio"] -TEST_DEPENDENCIES_SYNC = ["pytest", "requests"] +TEST_DEPENDENCIES_SYNC = ["pytest", "requests", "mock"] PYTHON_VERSIONS_ASYNC = ["3.7"] PYTHON_VERSIONS_SYNC = ["2.7", "3.7"] @@ -249,6 +249,7 @@ def app_engine(session): session.log("Skipping App Engine tests.") return + session.install(LIBRARY_DIR) # Unlike the default tests above, the App Engine system test require a # 'real' gcloud sdk installation that is configured to deploy to an # app engine project. @@ -269,9 +270,8 @@ def app_engine(session): application_url = GAE_APP_URL_TMPL.format(GAE_TEST_APP_SERVICE, project_id) # Vendor in the test application's dependencies - session.chdir(os.path.join(HERE, "../app_engine_test_app")) + session.chdir(os.path.join(HERE, "system_tests_sync/app_engine_test_app")) session.install(*TEST_DEPENDENCIES_SYNC) - session.install(LIBRARY_DIR) session.run( "pip", "install", "--target", "lib", "-r", "requirements.txt", silent=True ) @@ -288,7 +288,7 @@ def app_engine(session): @nox.session(python=PYTHON_VERSIONS_SYNC) def grpc(session): session.install(LIBRARY_DIR) - session.install(*TEST_DEPENDENCIES_SYNC, "google-cloud-pubsub==1.0.0") + session.install(*TEST_DEPENDENCIES_SYNC, "google-cloud-pubsub==1.7.0") session.env[EXPLICIT_CREDENTIALS_ENV] = SERVICE_ACCOUNT_FILE session.run("pytest", "system_tests_sync/test_grpc.py") diff --git a/system_tests/secrets.tar.enc b/system_tests/secrets.tar.enc new file mode 100644 index 000000000..29e06923f Binary files /dev/null and b/system_tests/secrets.tar.enc differ diff --git a/system_tests/system_tests_async/__init__.py b/system_tests/system_tests_async/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/system_tests/system_tests_async/conftest.py b/system_tests/system_tests_async/conftest.py index ecff74c96..47a473e7f 100644 --- a/system_tests/system_tests_async/conftest.py +++ b/system_tests/system_tests_async/conftest.py @@ -71,7 +71,8 @@ async def _token_info(access_token=None, id_token=None): url = _helpers.update_query(sync_conftest.TOKEN_INFO_URL, query_params) response = await http_request(url=url, method="GET") - data = await response.data.read() + + data = await response.content() return json.loads(data.decode("utf-8")) diff --git a/system_tests/system_tests_async/test_default.py b/system_tests/system_tests_async/test_default.py index 383cbff01..32299c059 100644 --- a/system_tests/system_tests_async/test_default.py +++ b/system_tests/system_tests_async/test_default.py @@ -15,14 +15,13 @@ import os import pytest -import google.auth +from google.auth import _default_async EXPECT_PROJECT_ID = os.environ.get("EXPECT_PROJECT_ID") @pytest.mark.asyncio async def test_application_default_credentials(verify_refresh): - credentials, project_id = google.auth.default_async() - #breakpoint() + credentials, project_id = _default_async.default_async() if EXPECT_PROJECT_ID is not None: assert project_id is not None diff --git a/system_tests/system_tests_sync/app_engine_test_app/requirements.txt b/system_tests/system_tests_sync/app_engine_test_app/requirements.txt index e390e141f..bd5c476ab 100644 --- a/system_tests/system_tests_sync/app_engine_test_app/requirements.txt +++ b/system_tests/system_tests_sync/app_engine_test_app/requirements.txt @@ -1,3 +1,3 @@ urllib3 # Relative path to google-auth-python's source. -../.. +../../.. diff --git a/system_tests/system_tests_sync/secrets.tar.enc b/system_tests/system_tests_sync/secrets.tar.enc index af10c7134..29e06923f 100644 Binary files a/system_tests/system_tests_sync/secrets.tar.enc and b/system_tests/system_tests_sync/secrets.tar.enc differ diff --git a/system_tests/system_tests_sync/test_grpc.py b/system_tests/system_tests_sync/test_grpc.py index 650fa96a4..7dcbd4c43 100644 --- a/system_tests/system_tests_sync/test_grpc.py +++ b/system_tests/system_tests_sync/test_grpc.py @@ -17,8 +17,6 @@ import google.auth.jwt import google.auth.transport.grpc from google.cloud import pubsub_v1 -from google.cloud.pubsub_v1.gapic import publisher_client -from google.cloud.pubsub_v1.gapic.transports import publisher_grpc_transport def test_grpc_request_with_regular_credentials(http_request): @@ -27,13 +25,8 @@ def test_grpc_request_with_regular_credentials(http_request): credentials, ["https://www.googleapis.com/auth/pubsub"] ) - transport = publisher_grpc_transport.PublisherGrpcTransport( - address=publisher_client.PublisherClient.SERVICE_ADDRESS, - credentials=credentials, - ) - # Create a pub/sub client. - client = pubsub_v1.PublisherClient(transport=transport) + client = pubsub_v1.PublisherClient(credentials=credentials) # list the topics and drain the iterator to test that an authorized API # call works. @@ -48,13 +41,8 @@ def test_grpc_request_with_jwt_credentials(): credentials, audience=audience ) - transport = publisher_grpc_transport.PublisherGrpcTransport( - address=publisher_client.PublisherClient.SERVICE_ADDRESS, - credentials=credentials, - ) - # Create a pub/sub client. - client = pubsub_v1.PublisherClient(transport=transport) + client = pubsub_v1.PublisherClient(credentials=credentials) # list the topics and drain the iterator to test that an authorized API # call works. @@ -68,13 +56,8 @@ def test_grpc_request_with_on_demand_jwt_credentials(): credentials ) - transport = publisher_grpc_transport.PublisherGrpcTransport( - address=publisher_client.PublisherClient.SERVICE_ADDRESS, - credentials=credentials, - ) - # Create a pub/sub client. - client = pubsub_v1.PublisherClient(transport=transport) + client = pubsub_v1.PublisherClient(credentials=credentials) # list the topics and drain the iterator to test that an authorized API # call works. diff --git a/system_tests/system_tests_sync/test_mtls_http.py b/system_tests/system_tests_sync/test_mtls_http.py index 7c5649685..bcf2a59da 100644 --- a/system_tests/system_tests_sync/test_mtls_http.py +++ b/system_tests/system_tests_sync/test_mtls_http.py @@ -13,8 +13,11 @@ # limitations under the License. import json -from os import path +import mock +import os import time +from os import path + import google.auth import google.auth.credentials
pypa__cibuildwheel-977
on windows, setup_py_python_requires attempts to open utf-8 setup.py as Windows-1252 and fails ### Description This [setup.py file](https://github.com/fgregg/fastcluster/blob/master/setup.py) is valid utf-8, and has a few non-ascii characters. In a windows build, `setup_py_python_requires` appears to be opening this file as if it was encoded like Windows-1252 and thus fails on some non-ascii characters. ### Build log https://github.com/fgregg/fastcluster/runs/4660766954?check_suite_focus=true#step:5:40 ### CI config https://github.com/fgregg/fastcluster/blob/master/.github/workflows/pythonpackage.yml#L41-L47
[ { "content": "import ast\nimport sys\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom typing import Any, Optional\n\nimport tomli\n\nif sys.version_info < (3, 8):\n Constant = ast.Str\n\n def get_constant(x: ast.Str) -> str:\n return x.s\n\nelse:\n Constant = ast.Constant\n\n def get_constant(x: ast.Constant) -> Any:\n return x.value\n\n\nclass Analyzer(ast.NodeVisitor):\n def __init__(self) -> None:\n self.requires_python: Optional[str] = None\n\n def visit(self, content: ast.AST) -> None:\n for node in ast.walk(content):\n for child in ast.iter_child_nodes(node):\n child.parent = node # type: ignore[attr-defined]\n super().visit(content)\n\n def visit_keyword(self, node: ast.keyword) -> None:\n self.generic_visit(node)\n if node.arg == \"python_requires\":\n # Must not be nested in an if or other structure\n # This will be Module -> Expr -> Call -> keyword\n if not hasattr(node.parent.parent.parent, \"parent\") and isinstance( # type: ignore[attr-defined]\n node.value, Constant\n ):\n self.requires_python = get_constant(node.value)\n\n\ndef setup_py_python_requires(content: str) -> Optional[str]:\n try:\n tree = ast.parse(content)\n analyzer = Analyzer()\n analyzer.visit(tree)\n return analyzer.requires_python or None\n except Exception:\n return None\n\n\ndef get_requires_python_str(package_dir: Path) -> Optional[str]:\n \"\"\"Return the python requires string from the most canonical source available, or None\"\"\"\n\n # Read in from pyproject.toml:project.requires-python\n try:\n with (package_dir / \"pyproject.toml\").open(\"rb\") as f1:\n info = tomli.load(f1)\n return str(info[\"project\"][\"requires-python\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n # Read in from setup.cfg:options.python_requires\n try:\n config = ConfigParser()\n config.read(package_dir / \"setup.cfg\")\n return str(config[\"options\"][\"python_requires\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n try:\n with (package_dir / \"setup.py\").open() as f2:\n return setup_py_python_requires(f2.read())\n except FileNotFoundError:\n pass\n\n return None\n", "path": "cibuildwheel/projectfiles.py" } ]
[ { "content": "import ast\nimport sys\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom typing import Any, Optional\n\nimport tomli\n\nif sys.version_info < (3, 8):\n Constant = ast.Str\n\n def get_constant(x: ast.Str) -> str:\n return x.s\n\nelse:\n Constant = ast.Constant\n\n def get_constant(x: ast.Constant) -> Any:\n return x.value\n\n\nclass Analyzer(ast.NodeVisitor):\n def __init__(self) -> None:\n self.requires_python: Optional[str] = None\n\n def visit(self, content: ast.AST) -> None:\n for node in ast.walk(content):\n for child in ast.iter_child_nodes(node):\n child.parent = node # type: ignore[attr-defined]\n super().visit(content)\n\n def visit_keyword(self, node: ast.keyword) -> None:\n self.generic_visit(node)\n if node.arg == \"python_requires\":\n # Must not be nested in an if or other structure\n # This will be Module -> Expr -> Call -> keyword\n if not hasattr(node.parent.parent.parent, \"parent\") and isinstance( # type: ignore[attr-defined]\n node.value, Constant\n ):\n self.requires_python = get_constant(node.value)\n\n\ndef setup_py_python_requires(content: str) -> Optional[str]:\n try:\n tree = ast.parse(content)\n analyzer = Analyzer()\n analyzer.visit(tree)\n return analyzer.requires_python or None\n except Exception:\n return None\n\n\ndef get_requires_python_str(package_dir: Path) -> Optional[str]:\n \"\"\"Return the python requires string from the most canonical source available, or None\"\"\"\n\n # Read in from pyproject.toml:project.requires-python\n try:\n with (package_dir / \"pyproject.toml\").open(\"rb\") as f1:\n info = tomli.load(f1)\n return str(info[\"project\"][\"requires-python\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n # Read in from setup.cfg:options.python_requires\n try:\n config = ConfigParser()\n config.read(package_dir / \"setup.cfg\")\n return str(config[\"options\"][\"python_requires\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n try:\n with (package_dir / \"setup.py\").open(encoding=\"utf8\") as f2:\n return setup_py_python_requires(f2.read())\n except FileNotFoundError:\n pass\n\n return None\n", "path": "cibuildwheel/projectfiles.py" } ]
diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py index c4f63c176..fece392f8 100644 --- a/cibuildwheel/projectfiles.py +++ b/cibuildwheel/projectfiles.py @@ -70,7 +70,7 @@ def get_requires_python_str(package_dir: Path) -> Optional[str]: pass try: - with (package_dir / "setup.py").open() as f2: + with (package_dir / "setup.py").open(encoding="utf8") as f2: return setup_py_python_requires(f2.read()) except FileNotFoundError: pass diff --git a/unit_test/projectfiles_test.py b/unit_test/projectfiles_test.py index c62df6a9c..6c55d46a1 100644 --- a/unit_test/projectfiles_test.py +++ b/unit_test/projectfiles_test.py @@ -25,7 +25,7 @@ def test_read_setup_py_simple(tmp_path): def test_read_setup_py_full(tmp_path): - with open(tmp_path / "setup.py", "w") as f: + with open(tmp_path / "setup.py", "w", encoding="utf8") as f: f.write( dedent( """ @@ -35,6 +35,7 @@ def test_read_setup_py_full(tmp_path): setuptools.setup( name = "hello", + description = "≥“”ü", other = 23, example = ["item", "other"], python_requires = "1.24", @@ -43,7 +44,9 @@ def test_read_setup_py_full(tmp_path): ) ) - assert setup_py_python_requires(tmp_path.joinpath("setup.py").read_text()) == "1.24" + assert ( + setup_py_python_requires(tmp_path.joinpath("setup.py").read_text(encoding="utf8")) == "1.24" + ) assert get_requires_python_str(tmp_path) == "1.24"
Lightning-AI__pytorch-lightning-799
Optional dependencies are required for deprecated logging module 🐛 Bug There is a backwards compatibility issues coming from PR #767. Notably, if a user doesn't have any of the extra logging dependencies then they'll be an import error. ### To Reproduce 1. Remove all logging dependencies from your environment (E.g. comet) 2. Depend on the deprecated pytorch_lightning.logging package and run ### Expected behavior We expect to maintain backwards compatibility here so optional dependencies shouldn't be required.
[ { "content": "\"\"\"\n.. warning:: `logging` package has been renamed to `loggers` since v0.6.1 and will be removed in v0.8.0\n\"\"\"\n\nimport warnings\n\nwarnings.warn(\"`logging` package has been renamed to `loggers` since v0.6.1\"\n \" and will be removed in v0.8.0\", DeprecationWarning)\n\nfrom pytorch_lightning.loggers import * # noqa: F403\nfrom pytorch_lightning.loggers import ( # noqa: E402\n base, comet, mlflow, neptune, tensorboard, test_tube, wandb\n)\n", "path": "pytorch_lightning/logging/__init__.py" } ]
[ { "content": "\"\"\"\n.. warning:: `logging` package has been renamed to `loggers` since v0.6.1 and will be removed in v0.8.0\n\"\"\"\n\nimport warnings\n\nwarnings.warn(\"`logging` package has been renamed to `loggers` since v0.6.1\"\n \" and will be removed in v0.8.0\", DeprecationWarning)\n\nfrom pytorch_lightning.loggers import * # noqa: F403\n", "path": "pytorch_lightning/logging/__init__.py" } ]
diff --git a/pytorch_lightning/logging/__init__.py b/pytorch_lightning/logging/__init__.py index 93515eb1eff31..ecd4c3b01e9f4 100644 --- a/pytorch_lightning/logging/__init__.py +++ b/pytorch_lightning/logging/__init__.py @@ -8,6 +8,3 @@ " and will be removed in v0.8.0", DeprecationWarning) from pytorch_lightning.loggers import * # noqa: F403 -from pytorch_lightning.loggers import ( # noqa: E402 - base, comet, mlflow, neptune, tensorboard, test_tube, wandb -)
pymeasure__pymeasure-852
How to include Channels in API documentation In #819 I encountered the question, how to include the Channel best into the documentation. It does not seem right, to put the channel in the same level as the instruments of that manufacturer. Any ideas?
[ { "content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\nimport logging\nfrom pymeasure.instruments.instrument import Instrument\nfrom pymeasure.instruments.channel import Channel\nfrom pymeasure.instruments.validators import (strict_discrete_range,\n strict_discrete_set,\n truncated_range\n )\nfrom enum import IntFlag\n\nlog = logging.getLogger(__name__) # https://docs.python.org/3/howto/logging.html#library-config\nlog.addHandler(logging.NullHandler())\n\n\nclass SystemStatusCode(IntFlag):\n \"\"\"System status enums based on ``IntFlag``\n\n Used in conjunction with :attr:`~.system_status_code`.\n\n ====== ======\n Value Enum\n ====== ======\n 256 WAVEFORM_DISPLAY\n 64 TIMER_ENABLED\n 32 FOUR_WIRE\n 16 OUTPUT_ENABLED\n 1 CONSTANT_CURRENT\n 0 CONSTANT_VOLTAGE\n ====== ======\n\n \"\"\"\n\n WAVEFORM_DISPLAY = 256 # bit 8 -- waveform display enabled\n TIMER_ENABLED = 64 # bit 6 -- timer enabled\n FOUR_WIRE = 32 # bit 5 -- four-wire mode enabled\n OUTPUT_ENABLED = 16 # bit 4 -- output enabled\n CONSTANT_CURRENT = 1 # bit 0 -- constant current mode\n CONSTANT_VOLTAGE = 0 # bit 0 -- constant voltage mode\n\n\nclass SPDChannel(Channel):\n \"\"\" The channel class for Siglent SPDxxxxX instruments.\n \"\"\"\n\n def __init__(self, parent, id,\n voltage_range: list = [0, 16],\n current_range: list = [0, 8]):\n super().__init__(parent, id)\n self.voltage_range = voltage_range\n self.current_range = current_range\n\n voltage = Instrument.measurement(\n \"MEAS:VOLT? CH{ch}\",\n \"\"\"Measure the channel output voltage.\n\n :type: float\n \"\"\"\n )\n\n current = Instrument.measurement(\n \"MEAS:CURR? CH{ch}\",\n \"\"\"Measure the channel output current.\n\n :type: float\n \"\"\"\n )\n\n power = Instrument.measurement(\n \"MEAS:POWE? CH{ch}\",\n \"\"\"Measure the channel output power.\n\n :type: float\n \"\"\"\n )\n\n current_limit = Instrument.control(\n \"CH{ch}:CURR?\", \"CH{ch}:CURR %g\",\n \"\"\"Control the output current configuration of the channel.\n\n :type : float\n \"\"\",\n validator=truncated_range,\n values=[0, 8],\n dynamic=True\n )\n\n voltage_setpoint = Instrument.control(\n \"CH{ch}:VOLT?\", \"CH{ch}:VOLT %g\",\n \"\"\"Control the output voltage configuration of the channel.\n\n :type : float\n \"\"\",\n validator=truncated_range,\n values=[0, 16],\n dynamic=True\n )\n\n def enable_output(self, enable: bool = True):\n \"\"\"Enable the channel output.\n\n :type: bool\n ``True``: enables the output\n ``False``: disables it\n \"\"\"\n self.parent.selected_channel = self.id\n self.write('OUTP CH{ch},' + (\"ON\" if enable else \"OFF\"))\n\n def enable_timer(self, enable: bool = True):\n \"\"\"Enable the channel timer.\n\n :type: bool\n ``True``: enables the timer\n ``False``: disables it\n \"\"\"\n self.write('TIME CH{ch},' + (\"ON\" if enable else \"OFF\"))\n\n def configure_timer(self, step, voltage, current, duration):\n \"\"\"Configure the timer step.\n\n :param step:\n int: index of the step to save the configuration\n :param voltage:\n float: voltage setpoint of the step\n :param current:\n float: current limit of the step\n :param duration:\n int: duration of the step in seconds\n \"\"\"\n step = strict_discrete_range(step, [1, 5], 1)\n voltage = truncated_range(voltage, self.voltage_range)\n current = truncated_range(current, self.current_range)\n duration = truncated_range(duration, [0, 10000])\n self.write(f'TIME:SET CH{{ch}},{step:d},{voltage:1.3f},{current:1.3f},{duration:d}')\n\n\nclass SPDBase(Instrument):\n \"\"\" The base class for Siglent SPDxxxxX instruments.\n \"\"\"\n\n def __init__(self, adapter, **kwargs):\n kwargs.setdefault('name', 'Siglent SPDxxxxX instrument Base Class')\n super().__init__(\n adapter,\n usb=dict(write_termination='\\n',\n read_termination='\\n'),\n tcpip=dict(write_termination='\\n',\n read_termination='\\n'),\n **kwargs\n )\n\n error = Instrument.measurement(\n \"SYST:ERR?\",\n \"\"\"Read the error code and information of the instrument.\n\n :type: string\n \"\"\"\n )\n\n fw_version = Instrument.measurement(\n \"SYST:VERS?\",\n \"\"\"Read the software version of the instrument.\n\n :type: string\n \"\"\"\n )\n\n system_status_code = Instrument.measurement(\n \"SYST:STAT?\",\n \"\"\"Read the system status register.\n\n :type: :class:`.SystemStatusCode`\n \"\"\",\n get_process=lambda v: SystemStatusCode(int(v, base=16)),\n )\n\n selected_channel = Instrument.control(\n \"INST?\", \"INST %s\",\n \"\"\"Control the selected channel of the instrument.\n\n :type : int\n \"\"\",\n validator=strict_discrete_set,\n values={1: \"CH1\"}, # This dynamic property should be updated for multi-channel instruments\n map_values=True,\n dynamic=True\n )\n\n def save_config(self, index):\n \"\"\"Save the current config to memory.\n\n :param index:\n int: index of the location to save the configuration\n \"\"\"\n index = strict_discrete_range(index, [1, 5], 1)\n self.write(f\"*SAV {index:d}\")\n\n def recall_config(self, index):\n \"\"\"Recall a config from memory.\n\n :param index:\n int: index of the location from which to recall the configuration\n \"\"\"\n index = strict_discrete_range(index, [1, 5], 1)\n self.write(f\"*RCL {index:d}\")\n\n def enable_local_interface(self, enable: bool = True):\n \"\"\"Configure the availability of the local interface.\n\n :type: bool\n ``True``: enables the local interface\n ``False``: disables it.\n \"\"\"\n self.write(\"*UNLOCK\" if enable else \"*LOCK\")\n\n def shutdown(self):\n \"\"\" Ensure that the voltage is turned to zero\n and disable the output. \"\"\"\n for ch in self.channels.values():\n ch.voltage_setpoint = 0\n ch.enable_output(False)\n super().shutdown()\n\n\nclass SPDSingleChannelBase(SPDBase):\n def enable_4W_mode(self, enable: bool = True):\n \"\"\"Enable 4-wire mode.\n\n :type: bool\n ``True``: enables 4-wire mode\n ``False``: disables it.\n \"\"\"\n self.write(f'MODE:SET {\"4W\" if enable else \"2W\"}')\n", "path": "pymeasure/instruments/siglenttechnologies/siglent_spdbase.py" } ]
[ { "content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\nimport logging\nfrom pymeasure.instruments.instrument import Instrument\nfrom pymeasure.instruments.channel import Channel\nfrom pymeasure.instruments.validators import (strict_discrete_range,\n strict_discrete_set,\n truncated_range\n )\nfrom enum import IntFlag\n\nlog = logging.getLogger(__name__) # https://docs.python.org/3/howto/logging.html#library-config\nlog.addHandler(logging.NullHandler())\n\n\nclass SystemStatusCode(IntFlag):\n \"\"\"System status enums based on ``IntFlag``\n\n Used in conjunction with :attr:`~.system_status_code`.\n\n ====== ======\n Value Enum\n ====== ======\n 256 WAVEFORM_DISPLAY\n 64 TIMER_ENABLED\n 32 FOUR_WIRE\n 16 OUTPUT_ENABLED\n 1 CONSTANT_CURRENT\n 0 CONSTANT_VOLTAGE\n ====== ======\n\n \"\"\"\n\n WAVEFORM_DISPLAY = 256 # bit 8 -- waveform display enabled\n TIMER_ENABLED = 64 # bit 6 -- timer enabled\n FOUR_WIRE = 32 # bit 5 -- four-wire mode enabled\n OUTPUT_ENABLED = 16 # bit 4 -- output enabled\n CONSTANT_CURRENT = 1 # bit 0 -- constant current mode\n CONSTANT_VOLTAGE = 0 # bit 0 -- constant voltage mode\n\n\nclass SPDChannel(Channel):\n \"\"\" The channel class for Siglent SPDxxxxX instruments.\n \"\"\"\n\n def __init__(self, parent, id,\n voltage_range: list = [0, 16],\n current_range: list = [0, 8]):\n super().__init__(parent, id)\n self.voltage_range = voltage_range\n self.current_range = current_range\n\n voltage = Instrument.measurement(\n \"MEAS:VOLT? CH{ch}\",\n \"\"\"Measure the channel output voltage.\n\n :type: float\n \"\"\"\n )\n\n current = Instrument.measurement(\n \"MEAS:CURR? CH{ch}\",\n \"\"\"Measure the channel output current.\n\n :type: float\n \"\"\"\n )\n\n power = Instrument.measurement(\n \"MEAS:POWE? CH{ch}\",\n \"\"\"Measure the channel output power.\n\n :type: float\n \"\"\"\n )\n\n current_limit = Instrument.control(\n \"CH{ch}:CURR?\", \"CH{ch}:CURR %g\",\n \"\"\"Control the output current configuration of the channel.\n\n :type : float\n \"\"\",\n validator=truncated_range,\n values=[0, 8],\n dynamic=True\n )\n\n voltage_setpoint = Instrument.control(\n \"CH{ch}:VOLT?\", \"CH{ch}:VOLT %g\",\n \"\"\"Control the output voltage configuration of the channel.\n\n :type : float\n \"\"\",\n validator=truncated_range,\n values=[0, 16],\n dynamic=True\n )\n\n def enable_output(self, enable: bool = True):\n \"\"\"Enable the channel output.\n\n :type: bool\n ``True``: enables the output\n ``False``: disables it\n \"\"\"\n self.parent.selected_channel = self.id\n self.write('OUTP CH{ch},' + (\"ON\" if enable else \"OFF\"))\n\n def enable_timer(self, enable: bool = True):\n \"\"\"Enable the channel timer.\n\n :type: bool\n ``True``: enables the timer\n ``False``: disables it\n \"\"\"\n self.write('TIME CH{ch},' + (\"ON\" if enable else \"OFF\"))\n\n def configure_timer(self, step, voltage, current, duration):\n \"\"\"Configure the timer step.\n\n :param step:\n int: index of the step to save the configuration\n :param voltage:\n float: voltage setpoint of the step\n :param current:\n float: current limit of the step\n :param duration:\n int: duration of the step in seconds\n \"\"\"\n step = strict_discrete_range(step, [1, 5], 1)\n voltage = truncated_range(voltage, self.voltage_range)\n current = truncated_range(current, self.current_range)\n duration = truncated_range(duration, [0, 10000])\n self.write(f'TIME:SET CH{{ch}},{step:d},{voltage:1.3f},{current:1.3f},{duration:d}')\n\n\nclass SPDBase(Instrument):\n \"\"\" The base class for Siglent SPDxxxxX instruments.\n\n Uses :class:`SPDChannel` for measurement channels.\n \"\"\"\n\n def __init__(self, adapter, **kwargs):\n kwargs.setdefault('name', 'Siglent SPDxxxxX instrument Base Class')\n super().__init__(\n adapter,\n usb=dict(write_termination='\\n',\n read_termination='\\n'),\n tcpip=dict(write_termination='\\n',\n read_termination='\\n'),\n **kwargs\n )\n\n error = Instrument.measurement(\n \"SYST:ERR?\",\n \"\"\"Read the error code and information of the instrument.\n\n :type: string\n \"\"\"\n )\n\n fw_version = Instrument.measurement(\n \"SYST:VERS?\",\n \"\"\"Read the software version of the instrument.\n\n :type: string\n \"\"\"\n )\n\n system_status_code = Instrument.measurement(\n \"SYST:STAT?\",\n \"\"\"Read the system status register.\n\n :type: :class:`.SystemStatusCode`\n \"\"\",\n get_process=lambda v: SystemStatusCode(int(v, base=16)),\n )\n\n selected_channel = Instrument.control(\n \"INST?\", \"INST %s\",\n \"\"\"Control the selected channel of the instrument.\n\n :type : int\n \"\"\",\n validator=strict_discrete_set,\n values={1: \"CH1\"}, # This dynamic property should be updated for multi-channel instruments\n map_values=True,\n dynamic=True\n )\n\n def save_config(self, index):\n \"\"\"Save the current config to memory.\n\n :param index:\n int: index of the location to save the configuration\n \"\"\"\n index = strict_discrete_range(index, [1, 5], 1)\n self.write(f\"*SAV {index:d}\")\n\n def recall_config(self, index):\n \"\"\"Recall a config from memory.\n\n :param index:\n int: index of the location from which to recall the configuration\n \"\"\"\n index = strict_discrete_range(index, [1, 5], 1)\n self.write(f\"*RCL {index:d}\")\n\n def enable_local_interface(self, enable: bool = True):\n \"\"\"Configure the availability of the local interface.\n\n :type: bool\n ``True``: enables the local interface\n ``False``: disables it.\n \"\"\"\n self.write(\"*UNLOCK\" if enable else \"*LOCK\")\n\n def shutdown(self):\n \"\"\" Ensure that the voltage is turned to zero\n and disable the output. \"\"\"\n for ch in self.channels.values():\n ch.voltage_setpoint = 0\n ch.enable_output(False)\n super().shutdown()\n\n\nclass SPDSingleChannelBase(SPDBase):\n def enable_4W_mode(self, enable: bool = True):\n \"\"\"Enable 4-wire mode.\n\n :type: bool\n ``True``: enables 4-wire mode\n ``False``: disables it.\n \"\"\"\n self.write(f'MODE:SET {\"4W\" if enable else \"2W\"}')\n", "path": "pymeasure/instruments/siglenttechnologies/siglent_spdbase.py" } ]
diff --git a/docs/dev/adding_instruments/channels.rst b/docs/dev/adding_instruments/channels.rst index dda8dea9d8..55873e202c 100644 --- a/docs/dev/adding_instruments/channels.rst +++ b/docs/dev/adding_instruments/channels.rst @@ -25,6 +25,17 @@ All the channel communication is routed through the instrument's methods (`write However, :meth:`Channel.insert_id <pymeasure.instruments.Channel.insert_id>` uses `str.format` to insert the channel's id at any occurence of the class attribute :attr:`Channel.placeholder`, which defaults to :code:`"ch"`, in the written commands. For example :code:`"Ch{ch}:VOLT?"` will be sent as :code:`"Ch3:VOLT?"` to the device, if the channel's id is "3". +Please add the channel to the documentation. In the instrument's documentation file, you may add + +.. code:: + + .. autoclass:: pymeasure.instruments.MANUFACTURER.INSTRUMENT.CHANNEL + :members: + :show-inheritance: + +`MANUFACTURER` is the folder name of the manufacturer and `INSTRUMENT` the file name of the instrument definition, which contains the `CHANNEL` class. +You may link in the instrument's docstring to the channel with :code:`:class:\`CHANNEL\`` + In order to add a channel to an instrument or to another channel (nesting channels is possible), create the channels with the class :class:`~pymeasure.instruments.common_base.CommonBase.ChannelCreator` as class attributes. Its constructor accepts a single channel class or list of classes and a list of corresponding ids. Instead of lists, you may also use tuples. diff --git a/pymeasure/instruments/siglenttechnologies/siglent_spdbase.py b/pymeasure/instruments/siglenttechnologies/siglent_spdbase.py index a596ecd0c1..9e7808d061 100644 --- a/pymeasure/instruments/siglenttechnologies/siglent_spdbase.py +++ b/pymeasure/instruments/siglenttechnologies/siglent_spdbase.py @@ -157,6 +157,8 @@ def configure_timer(self, step, voltage, current, duration): class SPDBase(Instrument): """ The base class for Siglent SPDxxxxX instruments. + + Uses :class:`SPDChannel` for measurement channels. """ def __init__(self, adapter, **kwargs):
voxel51__fiftyone-1179
[BUG] Cannot open desktop App with fiftyone==0.12.0 source install On a source install of `fiftyone==0.12.0`, the desktop App cannot be opened: ```shell fiftyone app launch --desktop ``` ``` Usage Error: Couldn't find a script named "start-app" ``` The `start-app` script in the last release was https://github.com/voxel51/fiftyone/blob/d383bfb0fd88a04a3352f06ddaa48599d0e8ce2a/app/package.json#L11) I tried updating https://github.com/voxel51/fiftyone/blob/da5d83a3e5b1578ba162f2453f26a1139991d370/fiftyone/core/service.py#L456 to both `yarn dev` and `yarn start` per the now-available scripts https://github.com/voxel51/fiftyone/blob/da5d83a3e5b1578ba162f2453f26a1139991d370/app/package.json#L9-L10 but neither caused the App window to appear for me (but no error either, just hung).
[ { "content": "\"\"\"\nFiftyOne Services.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport logging\nimport multiprocessing\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom packaging.version import Version\nimport psutil\nimport requests\nfrom retrying import retry\n\nimport eta.core.utils as etau\n\nimport fiftyone.constants as foc\nimport fiftyone.core.config as focn\nimport fiftyone.core.context as focx\nimport fiftyone.service.util as fosu\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ServiceException(Exception):\n \"\"\"Base class for service-related exceptions.\"\"\"\n\n pass\n\n\nclass ServiceListenTimeout(ServiceException):\n \"\"\"Exception raised when a network-bound service fails to bind to a port.\n \"\"\"\n\n def __init__(self, name, port=None):\n self.name = name\n self.port = port\n\n def __str__(self):\n message = \"%s failed to bind to port\" % self.name\n if self.port is not None:\n message += \" \" + str(self.port)\n\n return message\n\n\nclass Service(object):\n \"\"\"Interface for FiftyOne services.\n\n All services must define a ``command`` property.\n\n Services are run in an isolated Python subprocess (see ``service/main.py``)\n to ensure that they are shut down when the main Python process exits. The\n ``command`` and ``working_dir`` properties control the execution of the\n service in the subprocess.\n \"\"\"\n\n service_name = None\n working_dir = \".\"\n allow_headless = False\n\n def __init__(self):\n self._system = os.system\n self._disabled = (\n os.environ.get(\"FIFTYONE_SERVER\", False)\n or os.environ.get(\"FIFTYONE_DISABLE_SERVICES\", False)\n or multiprocessing.current_process().name != \"MainProcess\"\n or (\n os.environ.get(\"FIFTYONE_HEADLESS\", False)\n and not self.allow_headless\n )\n )\n self.child = None\n if not self._disabled:\n self.start()\n\n def __del__(self):\n \"\"\"Stops the service.\"\"\"\n if not self._disabled:\n self.stop()\n\n @property\n def command(self):\n raise NotImplementedError(\"%r must define `command`\" % type(self))\n\n @property\n def env(self):\n return {}\n\n @property\n def _service_args(self):\n \"\"\"Arguments passed to the service entrypoint.\"\"\"\n if not self.service_name:\n raise NotImplementedError(\n \"%r must define `service_name`\" % type(self)\n )\n\n return [\"--51-service\", self.service_name]\n\n def start(self):\n \"\"\"Starts the service.\"\"\"\n service_main_path = os.path.join(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))),\n \"service\",\n \"main.py\",\n )\n\n # use psutil's Popen wrapper because its wait() more reliably waits\n # for the process to exit on Windows\n self.child = psutil.Popen(\n [sys.executable, service_main_path]\n + self._service_args\n + self.command,\n cwd=self.working_dir,\n stdin=subprocess.PIPE,\n env={**os.environ, \"FIFTYONE_DISABLE_SERVICES\": \"1\", **self.env},\n )\n\n def stop(self):\n \"\"\"Stops the service.\"\"\"\n self.child.stdin.close()\n try:\n self.child.wait()\n except TypeError:\n pass\n\n def wait(self):\n \"\"\"Waits for the service to exit and returns its exit code.\"\"\"\n return self.child.wait()\n\n @staticmethod\n def cleanup():\n \"\"\"Performs any necessary cleanup when the service exits.\n\n This is called by the subprocess (cf. ``service/main.py``) and is not\n intended to be called directly.\n \"\"\"\n pass\n\n def _wait_for_child_port(self, port=None, timeout=10):\n \"\"\"Waits for any child process of this service to bind to a TCP port.\n\n Args:\n port: if specified, wait for a child to bind to this port\n timeout: the number of seconds to wait before failing\n\n Returns:\n the port the child has bound to (equal to the ``port`` argument\n if specified)\n\n Raises:\n ServiceListenTimeout: if the timeout was exceeded\n \"\"\"\n\n @retry(\n wait_fixed=250,\n stop_max_delay=timeout * 2000,\n retry_on_exception=lambda e: isinstance(e, ServiceListenTimeout),\n )\n def find_port():\n for child in fosu.normalize_wrapper_process(self.child).children(\n recursive=True\n ):\n try:\n for local_port in fosu.get_listening_tcp_ports(child):\n if port is None or port == local_port:\n return local_port\n\n except psutil.Error:\n pass\n\n raise ServiceListenTimeout(etau.get_class_name(self), port)\n\n return find_port()\n\n @classmethod\n def find_subclass_by_name(cls, name):\n for subclass in cls.__subclasses__():\n if subclass.service_name == name:\n return subclass\n\n try:\n return subclass.find_subclass_by_name(name)\n except ValueError:\n pass\n\n raise ValueError(\"Unrecognized %s subclass: %s\" % (cls.__name__, name))\n\n\nclass MultiClientService(Service):\n \"\"\"Base class for services that support multiple clients.\"\"\"\n\n # set when attaching to an existing process\n attached = False\n\n def __init__(self):\n super().__init__()\n\n @property\n def _service_args(self):\n return super()._service_args + [\"--multi\"]\n\n def start(self):\n \"\"\"Searches for a running instance of this service, or starts one\n if no instance is found.\n \"\"\"\n for process in fosu.find_processes_by_args(self._service_args):\n desc = \"Process %i (%s)\" % (\n process.pid,\n \" \".join([\"service/main.py\"] + self._service_args),\n )\n logger.debug(\"Connecting to %s\", desc)\n try:\n reply = fosu.send_ipc_message(\n process, (\"register\", os.getpid())\n )\n if reply == True:\n self.attached = True\n self.child = process\n return\n else:\n logger.warning(\"Failed to connect to %s: %r\", desc, reply)\n\n except IOError:\n logger.warning(\"%s did not respond\", desc)\n super().start()\n\n def stop(self):\n \"\"\"Disconnects from the service without actually stopping it.\"\"\"\n if self.attached:\n self.attached = False\n elif self.child is not None:\n # this process is the original parent\n self.child.stdin.close()\n\n self.child = None\n\n\nclass DatabaseService(MultiClientService):\n \"\"\"Service that controls the underlying MongoDB database.\"\"\"\n\n service_name = \"db\"\n allow_headless = True\n\n MONGOD_EXE_NAME = \"mongod\"\n if sys.platform.startswith(\"win\"):\n MONGOD_EXE_NAME += \".exe\"\n\n MIN_MONGO_VERSION = \"4.4\"\n\n @property\n def database_dir(self):\n config = focn.load_config()\n return config.database_dir\n\n @property\n def command(self):\n args = [\n DatabaseService.find_mongod(),\n \"--dbpath\",\n self.database_dir,\n \"--logpath\",\n os.path.join(self.database_dir, \"log/mongo.log\"),\n \"--port\",\n \"0\",\n ]\n if not sys.platform.startswith(\"win\"):\n args.append(\"--nounixsocket\")\n\n if focx._get_context() == focx._COLAB:\n args = [\"sudo\"] + args\n\n return args\n\n @property\n def port(self):\n return self._wait_for_child_port()\n\n def start(self):\n \"\"\"Starts the DatabaseService.\"\"\"\n etau.ensure_dir(os.path.join(self.database_dir, \"log\"))\n super().start()\n\n # Set up a default connection\n import fiftyone.core.odm.database as food\n\n food.set_default_port(self.port)\n food.get_db_conn()\n\n @staticmethod\n def cleanup():\n \"\"\"Deletes non-persistent datasets when the DB shuts down.\"\"\"\n import fiftyone.core.dataset as fod\n import fiftyone.core.odm.database as food\n import fiftyone.service.util as fosu\n\n try:\n port = next(\n port\n for child in psutil.Process().children()\n for port in fosu.get_listening_tcp_ports(child)\n )\n except (StopIteration, psutil.Error):\n # mongod may have exited - ok to wait until next time\n return\n\n try:\n food.set_default_port(port)\n food.get_db_conn()\n fod.delete_non_persistent_datasets()\n food.sync_database()\n except:\n # something weird may have happened, like a downward DB migration\n # - ok to wait until next time\n pass\n\n @staticmethod\n def find_mongod():\n \"\"\"Returns the path to the `mongod` executable.\"\"\"\n search_paths = [\n foc.FIFTYONE_DB_BIN_DIR,\n os.path.join(foc.FIFTYONE_CONFIG_DIR, \"bin\"),\n ] + os.environ[\"PATH\"].split(os.pathsep)\n searched = set()\n attempts = []\n for folder in search_paths:\n if folder in searched:\n continue\n\n searched.add(folder)\n mongod_path = os.path.join(folder, DatabaseService.MONGOD_EXE_NAME)\n if os.path.isfile(mongod_path):\n cmd = [mongod_path, \"--version\"]\n if focx._get_context() == focx._COLAB:\n cmd = [\"sudo\"] + cmd\n\n logger.debug(\"Trying %s\", mongod_path)\n p = psutil.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n )\n out, err = p.communicate()\n out = out.decode(errors=\"ignore\").strip()\n err = err.decode(errors=\"ignore\").strip()\n mongod_version = None\n if p.returncode == 0:\n match = re.search(r\"db version.+?([\\d\\.]+)\", out, re.I)\n if match:\n mongod_version = match.group(1)\n if Version(mongod_version) >= Version(\n DatabaseService.MIN_MONGO_VERSION\n ):\n return mongod_path\n\n attempts.append(\n (mongod_path, mongod_version, p.returncode, err)\n )\n\n for path, version, code, err in attempts:\n if version is not None:\n logger.warning(\"%s: incompatible version %s\", path, version)\n else:\n logger.error(\n \"%s: failed to launch (code %r): %s\", path, code, err\n )\n raise RuntimeError(\n \"Could not find mongod>=%s\" % DatabaseService.MIN_MONGO_VERSION\n )\n\n\nclass ServerService(Service):\n \"\"\"Service that controls the FiftyOne web server.\"\"\"\n\n service_name = \"server\"\n working_dir = foc.SERVER_DIR\n allow_headless = True\n\n def __init__(self, port, do_not_track=False):\n self._port = port\n self._do_not_track = do_not_track\n super().__init__()\n\n def start(self):\n server_version = None\n try:\n server_version = requests.get(\n \"http://127.0.0.1:%i/fiftyone\" % self._port, timeout=2\n ).json()[\"version\"]\n except Exception:\n pass\n\n if server_version is None:\n # There is likely not a fiftyone server running (remote or local),\n # so start a local server. If there actually is a fiftyone server\n # running that didn't respond to /fiftyone, the local server will\n # fail to start but the app will still connect successfully.\n super().start()\n self._wait_for_child_port(self._port)\n else:\n logger.info(\"Connected to fiftyone on local port %i\", self._port)\n logger.info(\n \"If you are not connecting to a remote session, you may need\\n\"\n \"to start a new session and specify a port.\\n\"\n )\n if server_version != foc.VERSION:\n logger.warning(\n \"Server version (%s) does not match client version (%s)\",\n server_version,\n foc.VERSION,\n )\n\n @property\n def command(self):\n command = [\n sys.executable,\n \"main.py\",\n \"--port\",\n str(self.port),\n ]\n return command\n\n @property\n def port(self):\n \"\"\"Getter for the current port\"\"\"\n return self._port\n\n @property\n def env(self):\n dnt = \"1\" if self._do_not_track else \"0\"\n return {\"FIFTYONE_DO_NOT_TRACK\": dnt}\n\n\nclass AppService(Service):\n \"\"\"Service that controls the FiftyOne app.\"\"\"\n\n service_name = \"app\"\n working_dir = foc.FIFTYONE_DESKTOP_APP_DIR\n\n def __init__(self, server_port=None):\n # initialize before start() is called\n self.server_port = server_port\n super().__init__()\n\n @property\n def command(self):\n with etau.WorkingDir(foc.FIFTYONE_DESKTOP_APP_DIR):\n return self.find_app()\n\n def find_app(self):\n if foc.DEV_INSTALL:\n return [\"yarn\", \"start-app\"]\n\n for path in etau.list_files(\"./\"):\n if path.endswith(\".tar.gz\"):\n logger.info(\"Installing FiftyOne App\")\n etau.extract_tar(path, \"./\", delete_tar=True)\n\n pre = foc.FIFTYONE_DESKTOP_APP_DIR\n for path in etau.list_files(\"./\"):\n if path.endswith(\".exe\"):\n return [os.path.join(pre + path)]\n\n if path.endswith(\".AppImage\"):\n return [os.path.join(pre, path)]\n\n if os.path.isdir(\"./FiftyOne.app\"):\n return [os.path.join(pre, \"FiftyOne.app/Contents/MacOS/FiftyOne\")]\n\n raise RuntimeError(\n \"Could not find FiftyOne app in %r\" % foc.FIFTYONE_DESKTOP_APP_DIR\n )\n\n @property\n def env(self):\n env = {}\n if self.server_port is not None:\n env[\"FIFTYONE_SERVER_PORT\"] = str(self.server_port)\n if foc.DEV_INSTALL:\n # override port 1212 used by \"yarn dev\" for hot-reloading\n # (specifying port 0 doesn't work here)\n env[\"PORT\"] = str(self.server_port + 1)\n return env\n", "path": "fiftyone/core/service.py" } ]
[ { "content": "\"\"\"\nFiftyOne Services.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport logging\nimport multiprocessing\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom packaging.version import Version\nimport psutil\nimport requests\nfrom retrying import retry\n\nimport eta.core.utils as etau\n\nimport fiftyone.constants as foc\nimport fiftyone.core.config as focn\nimport fiftyone.core.context as focx\nimport fiftyone.service.util as fosu\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ServiceException(Exception):\n \"\"\"Base class for service-related exceptions.\"\"\"\n\n pass\n\n\nclass ServiceListenTimeout(ServiceException):\n \"\"\"Exception raised when a network-bound service fails to bind to a port.\n \"\"\"\n\n def __init__(self, name, port=None):\n self.name = name\n self.port = port\n\n def __str__(self):\n message = \"%s failed to bind to port\" % self.name\n if self.port is not None:\n message += \" \" + str(self.port)\n\n return message\n\n\nclass Service(object):\n \"\"\"Interface for FiftyOne services.\n\n All services must define a ``command`` property.\n\n Services are run in an isolated Python subprocess (see ``service/main.py``)\n to ensure that they are shut down when the main Python process exits. The\n ``command`` and ``working_dir`` properties control the execution of the\n service in the subprocess.\n \"\"\"\n\n service_name = None\n working_dir = \".\"\n allow_headless = False\n\n def __init__(self):\n self._system = os.system\n self._disabled = (\n os.environ.get(\"FIFTYONE_SERVER\", False)\n or os.environ.get(\"FIFTYONE_DISABLE_SERVICES\", False)\n or multiprocessing.current_process().name != \"MainProcess\"\n or (\n os.environ.get(\"FIFTYONE_HEADLESS\", False)\n and not self.allow_headless\n )\n )\n self.child = None\n if not self._disabled:\n self.start()\n\n def __del__(self):\n \"\"\"Stops the service.\"\"\"\n if not self._disabled:\n self.stop()\n\n @property\n def command(self):\n raise NotImplementedError(\"%r must define `command`\" % type(self))\n\n @property\n def env(self):\n return {}\n\n @property\n def _service_args(self):\n \"\"\"Arguments passed to the service entrypoint.\"\"\"\n if not self.service_name:\n raise NotImplementedError(\n \"%r must define `service_name`\" % type(self)\n )\n\n return [\"--51-service\", self.service_name]\n\n def start(self):\n \"\"\"Starts the service.\"\"\"\n service_main_path = os.path.join(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))),\n \"service\",\n \"main.py\",\n )\n\n # use psutil's Popen wrapper because its wait() more reliably waits\n # for the process to exit on Windows\n self.child = psutil.Popen(\n [sys.executable, service_main_path]\n + self._service_args\n + self.command,\n cwd=self.working_dir,\n stdin=subprocess.PIPE,\n env={**os.environ, \"FIFTYONE_DISABLE_SERVICES\": \"1\", **self.env},\n )\n\n def stop(self):\n \"\"\"Stops the service.\"\"\"\n self.child.stdin.close()\n try:\n self.child.wait()\n except TypeError:\n pass\n\n def wait(self):\n \"\"\"Waits for the service to exit and returns its exit code.\"\"\"\n return self.child.wait()\n\n @staticmethod\n def cleanup():\n \"\"\"Performs any necessary cleanup when the service exits.\n\n This is called by the subprocess (cf. ``service/main.py``) and is not\n intended to be called directly.\n \"\"\"\n pass\n\n def _wait_for_child_port(self, port=None, timeout=10):\n \"\"\"Waits for any child process of this service to bind to a TCP port.\n\n Args:\n port: if specified, wait for a child to bind to this port\n timeout: the number of seconds to wait before failing\n\n Returns:\n the port the child has bound to (equal to the ``port`` argument\n if specified)\n\n Raises:\n ServiceListenTimeout: if the timeout was exceeded\n \"\"\"\n\n @retry(\n wait_fixed=250,\n stop_max_delay=timeout * 2000,\n retry_on_exception=lambda e: isinstance(e, ServiceListenTimeout),\n )\n def find_port():\n for child in fosu.normalize_wrapper_process(self.child).children(\n recursive=True\n ):\n try:\n for local_port in fosu.get_listening_tcp_ports(child):\n if port is None or port == local_port:\n return local_port\n\n except psutil.Error:\n pass\n\n raise ServiceListenTimeout(etau.get_class_name(self), port)\n\n return find_port()\n\n @classmethod\n def find_subclass_by_name(cls, name):\n for subclass in cls.__subclasses__():\n if subclass.service_name == name:\n return subclass\n\n try:\n return subclass.find_subclass_by_name(name)\n except ValueError:\n pass\n\n raise ValueError(\"Unrecognized %s subclass: %s\" % (cls.__name__, name))\n\n\nclass MultiClientService(Service):\n \"\"\"Base class for services that support multiple clients.\"\"\"\n\n # set when attaching to an existing process\n attached = False\n\n def __init__(self):\n super().__init__()\n\n @property\n def _service_args(self):\n return super()._service_args + [\"--multi\"]\n\n def start(self):\n \"\"\"Searches for a running instance of this service, or starts one\n if no instance is found.\n \"\"\"\n for process in fosu.find_processes_by_args(self._service_args):\n desc = \"Process %i (%s)\" % (\n process.pid,\n \" \".join([\"service/main.py\"] + self._service_args),\n )\n logger.debug(\"Connecting to %s\", desc)\n try:\n reply = fosu.send_ipc_message(\n process, (\"register\", os.getpid())\n )\n if reply == True:\n self.attached = True\n self.child = process\n return\n else:\n logger.warning(\"Failed to connect to %s: %r\", desc, reply)\n\n except IOError:\n logger.warning(\"%s did not respond\", desc)\n super().start()\n\n def stop(self):\n \"\"\"Disconnects from the service without actually stopping it.\"\"\"\n if self.attached:\n self.attached = False\n elif self.child is not None:\n # this process is the original parent\n self.child.stdin.close()\n\n self.child = None\n\n\nclass DatabaseService(MultiClientService):\n \"\"\"Service that controls the underlying MongoDB database.\"\"\"\n\n service_name = \"db\"\n allow_headless = True\n\n MONGOD_EXE_NAME = \"mongod\"\n if sys.platform.startswith(\"win\"):\n MONGOD_EXE_NAME += \".exe\"\n\n MIN_MONGO_VERSION = \"4.4\"\n\n @property\n def database_dir(self):\n config = focn.load_config()\n return config.database_dir\n\n @property\n def command(self):\n args = [\n DatabaseService.find_mongod(),\n \"--dbpath\",\n self.database_dir,\n \"--logpath\",\n os.path.join(self.database_dir, \"log/mongo.log\"),\n \"--port\",\n \"0\",\n ]\n if not sys.platform.startswith(\"win\"):\n args.append(\"--nounixsocket\")\n\n if focx._get_context() == focx._COLAB:\n args = [\"sudo\"] + args\n\n return args\n\n @property\n def port(self):\n return self._wait_for_child_port()\n\n def start(self):\n \"\"\"Starts the DatabaseService.\"\"\"\n etau.ensure_dir(os.path.join(self.database_dir, \"log\"))\n super().start()\n\n # Set up a default connection\n import fiftyone.core.odm.database as food\n\n food.set_default_port(self.port)\n food.get_db_conn()\n\n @staticmethod\n def cleanup():\n \"\"\"Deletes non-persistent datasets when the DB shuts down.\"\"\"\n import fiftyone.core.dataset as fod\n import fiftyone.core.odm.database as food\n import fiftyone.service.util as fosu\n\n try:\n port = next(\n port\n for child in psutil.Process().children()\n for port in fosu.get_listening_tcp_ports(child)\n )\n except (StopIteration, psutil.Error):\n # mongod may have exited - ok to wait until next time\n return\n\n try:\n food.set_default_port(port)\n food.get_db_conn()\n fod.delete_non_persistent_datasets()\n food.sync_database()\n except:\n # something weird may have happened, like a downward DB migration\n # - ok to wait until next time\n pass\n\n @staticmethod\n def find_mongod():\n \"\"\"Returns the path to the `mongod` executable.\"\"\"\n search_paths = [\n foc.FIFTYONE_DB_BIN_DIR,\n os.path.join(foc.FIFTYONE_CONFIG_DIR, \"bin\"),\n ] + os.environ[\"PATH\"].split(os.pathsep)\n searched = set()\n attempts = []\n for folder in search_paths:\n if folder in searched:\n continue\n\n searched.add(folder)\n mongod_path = os.path.join(folder, DatabaseService.MONGOD_EXE_NAME)\n if os.path.isfile(mongod_path):\n cmd = [mongod_path, \"--version\"]\n if focx._get_context() == focx._COLAB:\n cmd = [\"sudo\"] + cmd\n\n logger.debug(\"Trying %s\", mongod_path)\n p = psutil.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n )\n out, err = p.communicate()\n out = out.decode(errors=\"ignore\").strip()\n err = err.decode(errors=\"ignore\").strip()\n mongod_version = None\n if p.returncode == 0:\n match = re.search(r\"db version.+?([\\d\\.]+)\", out, re.I)\n if match:\n mongod_version = match.group(1)\n if Version(mongod_version) >= Version(\n DatabaseService.MIN_MONGO_VERSION\n ):\n return mongod_path\n\n attempts.append(\n (mongod_path, mongod_version, p.returncode, err)\n )\n\n for path, version, code, err in attempts:\n if version is not None:\n logger.warning(\"%s: incompatible version %s\", path, version)\n else:\n logger.error(\n \"%s: failed to launch (code %r): %s\", path, code, err\n )\n raise RuntimeError(\n \"Could not find mongod>=%s\" % DatabaseService.MIN_MONGO_VERSION\n )\n\n\nclass ServerService(Service):\n \"\"\"Service that controls the FiftyOne web server.\"\"\"\n\n service_name = \"server\"\n working_dir = foc.SERVER_DIR\n allow_headless = True\n\n def __init__(self, port, do_not_track=False):\n self._port = port\n self._do_not_track = do_not_track\n super().__init__()\n\n def start(self):\n server_version = None\n try:\n server_version = requests.get(\n \"http://127.0.0.1:%i/fiftyone\" % self._port, timeout=2\n ).json()[\"version\"]\n except Exception:\n pass\n\n if server_version is None:\n # There is likely not a fiftyone server running (remote or local),\n # so start a local server. If there actually is a fiftyone server\n # running that didn't respond to /fiftyone, the local server will\n # fail to start but the app will still connect successfully.\n super().start()\n self._wait_for_child_port(self._port)\n else:\n logger.info(\"Connected to fiftyone on local port %i\", self._port)\n logger.info(\n \"If you are not connecting to a remote session, you may need\\n\"\n \"to start a new session and specify a port.\\n\"\n )\n if server_version != foc.VERSION:\n logger.warning(\n \"Server version (%s) does not match client version (%s)\",\n server_version,\n foc.VERSION,\n )\n\n @property\n def command(self):\n command = [\n sys.executable,\n \"main.py\",\n \"--port\",\n str(self.port),\n ]\n return command\n\n @property\n def port(self):\n \"\"\"Getter for the current port\"\"\"\n return self._port\n\n @property\n def env(self):\n dnt = \"1\" if self._do_not_track else \"0\"\n return {\"FIFTYONE_DO_NOT_TRACK\": dnt}\n\n\nclass AppService(Service):\n \"\"\"Service that controls the FiftyOne app.\"\"\"\n\n service_name = \"app\"\n working_dir = foc.FIFTYONE_DESKTOP_APP_DIR\n\n def __init__(self, server_port=None):\n # initialize before start() is called\n self.server_port = server_port\n super().__init__()\n\n @property\n def command(self):\n with etau.WorkingDir(foc.FIFTYONE_DESKTOP_APP_DIR):\n return self.find_app()\n\n def find_app(self):\n if foc.DEV_INSTALL:\n return [\"yarn\", \"start-desktop\"]\n\n for path in etau.list_files(\"./\"):\n if path.endswith(\".tar.gz\"):\n logger.info(\"Installing FiftyOne App\")\n etau.extract_tar(path, \"./\", delete_tar=True)\n\n pre = foc.FIFTYONE_DESKTOP_APP_DIR\n for path in etau.list_files(\"./\"):\n if path.endswith(\".exe\"):\n return [os.path.join(pre + path)]\n\n if path.endswith(\".AppImage\"):\n return [os.path.join(pre, path)]\n\n if os.path.isdir(\"./FiftyOne.app\"):\n return [os.path.join(pre, \"FiftyOne.app/Contents/MacOS/FiftyOne\")]\n\n raise RuntimeError(\n \"Could not find FiftyOne app in %r\" % foc.FIFTYONE_DESKTOP_APP_DIR\n )\n\n @property\n def env(self):\n env = {}\n if self.server_port is not None:\n env[\"FIFTYONE_SERVER_PORT\"] = str(self.server_port)\n if foc.DEV_INSTALL:\n # override port 1212 used by \"yarn dev\" for hot-reloading\n # (specifying port 0 doesn't work here)\n env[\"PORT\"] = str(self.server_port + 1)\n return env\n", "path": "fiftyone/core/service.py" } ]
diff --git a/app/package.json b/app/package.json index b9960a8bdaf..4e209df7539 100644 --- a/app/package.json +++ b/app/package.json @@ -6,10 +6,11 @@ "private": true, "main": "index.js", "scripts": { + "build": "yarn workspace @fiftyone/app build", "dev": "yarn workspace @fiftyone/app dev", + "postinstall": "patch-package", "start": "yarn workspace @fiftyone/app start", - "build": "yarn workspace @fiftyone/app build", - "postinstall": "patch-package" + "start-desktop": "yarn workspace FiftyOne start-desktop" }, "devDependencies": { "patch-package": "^6.4.7", diff --git a/app/packages/desktop/package.json b/app/packages/desktop/package.json index 5d8934b0fce..22ce28e1396 100644 --- a/app/packages/desktop/package.json +++ b/app/packages/desktop/package.json @@ -8,13 +8,14 @@ "private": true, "prettier": "@fiftyone/prettier-config", "scripts": { - "build-desktop": "yarn build-desktop-source && yarn pull-desktop-source && tsc -p tsconfig.json", - "build-desktop-source": "yarn workspace @fiftyone/app build-bare", - "pull-desktop-source": "yarn workspace @fiftyone/app copy-to-desktop", - "start-desktop": "cross-env DEBUG_APP=true electron ./dist/main.js", - "package-mac": "yarn build-desktop && electron-builder build --mac", - "package-linux": "yarn build-desktop && electron-builder build --linux", - "package-win": "yarn build-desktop && electron-builder build --win --x64" + "build": "yarn build-source && yarn build-desktop", + "build-desktop": "yarn pull-source && tsc -p tsconfig.json", + "build-source": "yarn workspace @fiftyone/app build-bare", + "pull-source": "yarn workspace @fiftyone/app copy-to-desktop", + "start-desktop": "yarn build-desktop && cross-env DEBUG_APP=true electron ./dist/main.js", + "package-mac": "yarn build && electron-builder build --mac", + "package-linux": "yarn build && electron-builder build --linux", + "package-win": "yarn build && electron-builder build --win --x64" }, "build": { "productName": "FiftyOne", diff --git a/fiftyone/core/service.py b/fiftyone/core/service.py index 2e79b1651b1..4a2a16862e7 100644 --- a/fiftyone/core/service.py +++ b/fiftyone/core/service.py @@ -453,7 +453,7 @@ def command(self): def find_app(self): if foc.DEV_INSTALL: - return ["yarn", "start-app"] + return ["yarn", "start-desktop"] for path in etau.list_files("./"): if path.endswith(".tar.gz"):
apache__tvm-12178
Exercise TVM under minimal configuration in CI We have seen a couple bugs due to microTVM being presumed-ON in config.cmake. Namely, you get python errors importing TVM right now when USE_MICRO is OFF. We should have a regression test that verifies basic functionality with everything (or nearly everything) OFF. Context: apache/tvm#9617 And another micro-related issue of the same kind, which i don't have handy right now. cc @gigiblender
[ { "content": "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport jinja2\nimport argparse\nimport difflib\nimport re\nimport datetime\nimport textwrap\n\nfrom pathlib import Path\n\n\nREPO_ROOT = Path(__file__).resolve().parent.parent.parent\nJENKINSFILE_TEMPLATE = REPO_ROOT / \"ci\" / \"jenkins\" / \"Jenkinsfile.j2\"\nJENKINSFILE = REPO_ROOT / \"Jenkinsfile\"\n\n\ndata = {\n \"images\": [\n {\n \"name\": \"ci_arm\",\n \"platform\": \"ARM\",\n },\n {\n \"name\": \"ci_cpu\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_gpu\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_hexagon\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_i386\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_lint\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_cortexm\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_wasm\",\n \"platform\": \"CPU\",\n },\n ]\n}\n\n\ndef lines_without_generated_tag(content):\n return [\n line for line in content.splitlines(keepends=True) if not line.startswith(\"// Generated at\")\n ]\n\n\nif __name__ == \"__main__\":\n help = \"Regenerate Jenkinsfile from template\"\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument(\"--check\", action=\"store_true\", help=\"just verify the output didn't change\")\n args = parser.parse_args()\n\n with open(JENKINSFILE) as f:\n content = f.read()\n\n data[\"generated_time\"] = datetime.datetime.now().isoformat()\n\n environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(REPO_ROOT),\n undefined=jinja2.StrictUndefined,\n lstrip_blocks=True,\n trim_blocks=True,\n keep_trailing_newline=True,\n )\n template = environment.get_template(str(JENKINSFILE_TEMPLATE.relative_to(REPO_ROOT)))\n new_content = template.render(**data)\n\n diff = \"\".join(\n difflib.unified_diff(\n lines_without_generated_tag(content), lines_without_generated_tag(new_content)\n )\n )\n if args.check:\n if not diff:\n print(\"Success, the newly generated Jenkinsfile matched the one on disk\")\n exit(0)\n else:\n print(\n textwrap.dedent(\n \"\"\"\n Newly generated Jenkinsfile did not match the one on disk! If you have made\n edits to the Jenkinsfile, move them to 'jenkins/Jenkinsfile.j2' and\n regenerate the Jenkinsfile from the template with\n\n python3 -m pip install -r jenkins/requirements.txt\n python3 jenkins/generate.py\n\n Diffed changes:\n \"\"\"\n ).strip()\n )\n print(diff)\n exit(1)\n else:\n with open(JENKINSFILE, \"w\") as f:\n f.write(new_content)\n if not diff:\n print(f\"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, no changes made\")\n else:\n print(f\"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, changes:\")\n print(diff)\n", "path": "ci/jenkins/generate.py" } ]
[ { "content": "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport jinja2\nimport argparse\nimport difflib\nimport re\nimport datetime\nimport textwrap\n\nfrom pathlib import Path\n\n\nREPO_ROOT = Path(__file__).resolve().parent.parent.parent\nJENKINSFILE_TEMPLATE = REPO_ROOT / \"ci\" / \"jenkins\" / \"Jenkinsfile.j2\"\nJENKINSFILE = REPO_ROOT / \"Jenkinsfile\"\n\n\ndata = {\n \"images\": [\n {\n \"name\": \"ci_arm\",\n \"platform\": \"ARM\",\n },\n {\n \"name\": \"ci_cpu\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_minimal\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_gpu\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_hexagon\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_i386\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_lint\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_cortexm\",\n \"platform\": \"CPU\",\n },\n {\n \"name\": \"ci_wasm\",\n \"platform\": \"CPU\",\n },\n ]\n}\n\n\ndef lines_without_generated_tag(content):\n return [\n line for line in content.splitlines(keepends=True) if not line.startswith(\"// Generated at\")\n ]\n\n\nif __name__ == \"__main__\":\n help = \"Regenerate Jenkinsfile from template\"\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument(\"--check\", action=\"store_true\", help=\"just verify the output didn't change\")\n args = parser.parse_args()\n\n with open(JENKINSFILE) as f:\n content = f.read()\n\n data[\"generated_time\"] = datetime.datetime.now().isoformat()\n\n environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(REPO_ROOT),\n undefined=jinja2.StrictUndefined,\n lstrip_blocks=True,\n trim_blocks=True,\n keep_trailing_newline=True,\n )\n template = environment.get_template(str(JENKINSFILE_TEMPLATE.relative_to(REPO_ROOT)))\n new_content = template.render(**data)\n\n diff = \"\".join(\n difflib.unified_diff(\n lines_without_generated_tag(content), lines_without_generated_tag(new_content)\n )\n )\n if args.check:\n if not diff:\n print(\"Success, the newly generated Jenkinsfile matched the one on disk\")\n exit(0)\n else:\n print(\n textwrap.dedent(\n \"\"\"\n Newly generated Jenkinsfile did not match the one on disk! If you have made\n edits to the Jenkinsfile, move them to 'jenkins/Jenkinsfile.j2' and\n regenerate the Jenkinsfile from the template with\n\n python3 -m pip install -r jenkins/requirements.txt\n python3 jenkins/generate.py\n\n Diffed changes:\n \"\"\"\n ).strip()\n )\n print(diff)\n exit(1)\n else:\n with open(JENKINSFILE, \"w\") as f:\n f.write(new_content)\n if not diff:\n print(f\"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, no changes made\")\n else:\n print(f\"Wrote output to {JENKINSFILE.relative_to(REPO_ROOT)}, changes:\")\n print(diff)\n", "path": "ci/jenkins/generate.py" } ]
diff --git a/CMakeLists.txt b/CMakeLists.txt index 8dc03ee0f40e..a6be494a3a53 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -794,3 +794,5 @@ find_and_set_linker(${USE_ALTERNATIVE_LINKER}) if(${SUMMARIZE}) print_summary() endif() + +dump_options_to_file("${TVM_ALL_OPTIONS}") diff --git a/Jenkinsfile b/Jenkinsfile index a2fe67d4b5f3..0114bf755cb7 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -52,6 +52,7 @@ import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20220715-060127-37f9d3c49' ci_gpu = 'tlcpack/ci-gpu:20220801-060139-d332eb374' ci_cpu = 'tlcpack/ci-cpu:20220715-060127-37f9d3c49' +ci_minimal = 'tlcpack/ci-minimal:20220725-133226-d3cefdaf1' ci_wasm = 'tlcpack/ci-wasm:20220715-060127-37f9d3c49' ci_i386 = 'tlcpack/ci-i386:20220715-060127-37f9d3c49' ci_cortexm = 'tlcpack/ci-cortexm:v0.01' @@ -66,6 +67,7 @@ properties([ parameters([ string(name: 'ci_arm_param', defaultValue: ''), string(name: 'ci_cpu_param', defaultValue: ''), + string(name: 'ci_minimal_param', defaultValue: ''), string(name: 'ci_gpu_param', defaultValue: ''), string(name: 'ci_hexagon_param', defaultValue: ''), string(name: 'ci_i386_param', defaultValue: ''), @@ -79,6 +81,7 @@ properties([ // is used) built_ci_arm = null; built_ci_cpu = null; + built_ci_minimal = null; built_ci_gpu = null; built_ci_hexagon = null; built_ci_i386 = null; @@ -273,7 +276,7 @@ def prepare() { if (env.DETERMINE_DOCKER_IMAGES == 'yes') { sh( - script: "./tests/scripts/determine_docker_images.py ci_arm=${ci_arm} ci_cpu=${ci_cpu} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_cortexm=${ci_cortexm} ci_wasm=${ci_wasm} ", + script: "./tests/scripts/determine_docker_images.py ci_arm=${ci_arm} ci_cpu=${ci_cpu} ci_minimal=${ci_minimal} ci_gpu=${ci_gpu} ci_hexagon=${ci_hexagon} ci_i386=${ci_i386} ci_lint=${ci_lint} ci_cortexm=${ci_cortexm} ci_wasm=${ci_wasm} ", label: 'Decide whether to use tlcpack or tlcpackstaging for Docker images', ) // Pull image names from the results of should_rebuild_docker.py @@ -287,6 +290,11 @@ def prepare() { label: "Find docker image name for ci_cpu", returnStdout: true, ).trim() + ci_minimal = sh( + script: "cat .docker-image-names/ci_minimal", + label: "Find docker image name for ci_minimal", + returnStdout: true, + ).trim() ci_gpu = sh( script: "cat .docker-image-names/ci_gpu", label: "Find docker image name for ci_gpu", @@ -321,6 +329,7 @@ def prepare() { ci_arm = params.ci_arm_param ?: ci_arm ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_minimal = params.ci_minimal_param ?: ci_minimal ci_gpu = params.ci_gpu_param ?: ci_gpu ci_hexagon = params.ci_hexagon_param ?: ci_hexagon ci_i386 = params.ci_i386_param ?: ci_i386 @@ -332,6 +341,7 @@ def prepare() { echo "Docker images being used in this build:" echo " ci_arm = ${ci_arm}" echo " ci_cpu = ${ci_cpu}" + echo " ci_minimal = ${ci_minimal}" echo " ci_gpu = ${ci_gpu}" echo " ci_hexagon = ${ci_hexagon}" echo " ci_i386 = ${ci_i386}" @@ -483,6 +493,17 @@ def build_docker_images() { } } }, + 'ci_minimal': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + // We're purposefully not setting the built image here since they + // are not yet being uploaded to tlcpack + // ci_minimal = build_image('ci_minimal') + built_ci_minimal = build_image('ci_minimal'); + } + } + }, 'ci_gpu': { node('CPU') { timeout(time: max_time, unit: 'MINUTES') { @@ -629,7 +650,6 @@ def cpp_unittest(image) { ) } - def add_microtvm_permissions() { sh( script: 'find build/microtvm_template_projects -type f | grep qemu-hack | xargs chmod +x', @@ -820,6 +840,56 @@ stage('Build') { Utils.markStageSkippedForConditional('BUILD: CPU') } }, + 'BUILD: CPU MINIMAL': { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cpu-minimal") { + docker_init(ci_minimal) + init_git() + sh ( + script: "${docker_run} ${ci_minimal} ./tests/scripts/task_config_build_minimal.sh build", + label: 'Create CPU minimal cmake config', + ) + make(ci_minimal, 'build', '-j2') + sh( + script: """ + set -eux + retry() { + local retries=\$1 + shift + + local count=0 + until "\$@"; do + exit=\$? + wait=\$((2 ** \$count)) + count=\$((\$count + 1)) + if [ \$count -lt \$retries ]; then + echo "Retry \$count/\$retries exited \$exit, retrying in \$wait seconds..." + sleep \$wait + else + echo "Retry \$count/\$retries exited \$exit, no more retries left." + return \$exit + fi + done + return 0 + } + + md5sum build/libtvm.so + retry 3 aws s3 cp --no-progress build/libtvm.so s3://${s3_prefix}/cpu-minimal/build/libtvm.so + md5sum build/libtvm_runtime.so + retry 3 aws s3 cp --no-progress build/libtvm_runtime.so s3://${s3_prefix}/cpu-minimal/build/libtvm_runtime.so + md5sum build/config.cmake + retry 3 aws s3 cp --no-progress build/config.cmake s3://${s3_prefix}/cpu-minimal/build/config.cmake + """, + label: 'Upload artifacts to S3', + ) + + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: CPU MINIMAL') + } + }, 'BUILD: WASM': { if (!skip_ci && is_docs_only_build != 1) { node('CPU-SMALL') { @@ -4848,6 +4918,69 @@ def shard_run_test_Cortex_M_8_of_8() { } +def run_unittest_minimal() { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-cpu-minimal") { + timeout(time: max_time, unit: 'MINUTES') { + try { + docker_init(ci_minimal) + init_git() + withEnv(['PLATFORM=minimal'], { + sh( + script: """ + set -eux + retry() { + local retries=\$1 + shift + + local count=0 + until "\$@"; do + exit=\$? + wait=\$((2 ** \$count)) + count=\$((\$count + 1)) + if [ \$count -lt \$retries ]; then + echo "Retry \$count/\$retries exited \$exit, retrying in \$wait seconds..." + sleep \$wait + else + echo "Retry \$count/\$retries exited \$exit, no more retries left." + return \$exit + fi + done + return 0 + } + + retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu-minimal/build/libtvm.so build/libtvm.so + md5sum build/libtvm.so + retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu-minimal/build/libtvm_runtime.so build/libtvm_runtime.so + md5sum build/libtvm_runtime.so + retry 3 aws s3 cp --no-progress s3://${s3_prefix}/cpu-minimal/build/config.cmake build/config.cmake + md5sum build/config.cmake + """, + label: 'Download artifacts from S3', + ) + + cpp_unittest(ci_minimal) + python_unittest(ci_minimal) + }) + } finally { + sh( + script: """ + set -eux + aws s3 cp --no-progress build/pytest-results s3://${s3_prefix}/pytest-results/unittest_CPU_MINIMAL --recursive + """, + label: 'Upload JUnits to S3', + ) + + junit 'build/pytest-results/*.xml' + } + } + } + } + } else { + Utils.markStageSkippedForConditional('unittest: CPU MINIMAL') + } +} def test() { stage('Test') { @@ -5008,6 +5141,9 @@ stage('Test') { 'test: Cortex-M 8 of 8': { shard_run_test_Cortex_M_8_of_8() }, + 'unittest: CPU MINIMAL': { + run_unittest_minimal() + }, 'unittest: CPU': { if (!skip_ci && is_docs_only_build != 1) { node('CPU-SMALL') { @@ -5379,6 +5515,7 @@ def deploy() { def tag = "${date_Ymd_HMS}-${upstream_revision.substring(0, 8)}" update_docker(built_ci_arm, "tlcpackstaging/ci_arm:${tag}") update_docker(built_ci_cpu, "tlcpackstaging/ci_cpu:${tag}") + update_docker(built_ci_minimal, "tlcpackstaging/ci_minimal:${tag}") update_docker(built_ci_gpu, "tlcpackstaging/ci_gpu:${tag}") update_docker(built_ci_hexagon, "tlcpackstaging/ci_hexagon:${tag}") update_docker(built_ci_i386, "tlcpackstaging/ci_i386:${tag}") diff --git a/ci/jenkins/Build.groovy.j2 b/ci/jenkins/Build.groovy.j2 index a4316a268e9a..21b8b2c65f8b 100644 --- a/ci/jenkins/Build.groovy.j2 +++ b/ci/jenkins/Build.groovy.j2 @@ -33,7 +33,6 @@ def cpp_unittest(image) { ) } - def add_microtvm_permissions() { {% for folder in microtvm_template_projects %} sh( @@ -123,6 +122,24 @@ stage('Build') { Utils.markStageSkippedForConditional('BUILD: CPU') } }, + 'BUILD: CPU MINIMAL': { + if (!skip_ci && is_docs_only_build != 1) { + node('CPU-SMALL') { + ws({{ m.per_exec_ws('tvm/build-cpu-minimal') }}) { + docker_init(ci_minimal) + init_git() + sh ( + script: "${docker_run} ${ci_minimal} ./tests/scripts/task_config_build_minimal.sh build", + label: 'Create CPU minimal cmake config', + ) + make(ci_minimal, 'build', '-j2') + {{ m.upload_artifacts(tag='cpu-minimal', filenames=tvm_lib) }} + } + } + } else { + Utils.markStageSkippedForConditional('BUILD: CPU MINIMAL') + } + }, 'BUILD: WASM': { if (!skip_ci && is_docs_only_build != 1) { node('CPU-SMALL') { diff --git a/ci/jenkins/Jenkinsfile.j2 b/ci/jenkins/Jenkinsfile.j2 index 63131ff7ffc2..f91cf88a40b3 100644 --- a/ci/jenkins/Jenkinsfile.j2 +++ b/ci/jenkins/Jenkinsfile.j2 @@ -54,6 +54,7 @@ import org.jenkinsci.plugins.pipeline.modeldefinition.Utils ci_lint = 'tlcpack/ci-lint:20220715-060127-37f9d3c49' ci_gpu = 'tlcpack/ci-gpu:20220801-060139-d332eb374' ci_cpu = 'tlcpack/ci-cpu:20220715-060127-37f9d3c49' +ci_minimal = 'tlcpack/ci-minimal:20220725-133226-d3cefdaf1' ci_wasm = 'tlcpack/ci-wasm:20220715-060127-37f9d3c49' ci_i386 = 'tlcpack/ci-i386:20220715-060127-37f9d3c49' ci_cortexm = 'tlcpack/ci-cortexm:v0.01' diff --git a/ci/jenkins/Test.groovy.j2 b/ci/jenkins/Test.groovy.j2 index b2afdacad7d1..09550a469701 100644 --- a/ci/jenkins/Test.groovy.j2 +++ b/ci/jenkins/Test.groovy.j2 @@ -211,6 +211,19 @@ ) {% endcall %} +def run_unittest_minimal() { + {% call m.test_step_body( + name="unittest: CPU MINIMAL", + node="CPU-SMALL", + ws="tvm/ut-python-cpu-minimal", + platform="minimal", + docker_image="ci_minimal", + ) %} + {{ m.download_artifacts(tag='cpu-minimal', filenames=tvm_lib) }} + cpp_unittest(ci_minimal) + python_unittest(ci_minimal) + {% endcall %} +} def test() { stage('Test') { @@ -223,6 +236,9 @@ stage('Test') { {{ method_name }}() }, {% endfor %} + 'unittest: CPU MINIMAL': { + run_unittest_minimal() + }, {% call m.test_step( name="unittest: CPU", node="CPU-SMALL", diff --git a/ci/jenkins/generate.py b/ci/jenkins/generate.py index 82bf4e5aaa1f..3d0198ba6fd9 100644 --- a/ci/jenkins/generate.py +++ b/ci/jenkins/generate.py @@ -40,6 +40,10 @@ "name": "ci_cpu", "platform": "CPU", }, + { + "name": "ci_minimal", + "platform": "CPU", + }, { "name": "ci_gpu", "platform": "CPU", diff --git a/ci/jenkins/macros.j2 b/ci/jenkins/macros.j2 index 99b7dc1bcd90..082446cb11b6 100644 --- a/ci/jenkins/macros.j2 +++ b/ci/jenkins/macros.j2 @@ -84,6 +84,29 @@ def {{ method_name }}() { {% endfor %} {% endmacro %} +{% macro test_step_body(name, node, ws, docker_image, platform) %} +{% set test_dir_name = name.replace(":", "").replace(" ", "-").replace("-", "_")|string %} + if (!skip_ci && is_docs_only_build != 1) { + node('{{ node }}') { + ws({{ per_exec_ws(ws) }}) { + timeout(time: max_time, unit: 'MINUTES') { + try { + docker_init({{ docker_image }}) + init_git() + withEnv(['PLATFORM={{ platform }}'], { + {{ caller() | indent(width=8) | trim }} + }) + } finally { + {{ junit_to_s3(test_dir_name) | indent(width=0) }} + junit 'build/pytest-results/*.xml' + } + } + } + } + } else { + Utils.markStageSkippedForConditional('{{ name }}') + } +{% endmacro %} {% macro test_step(name, node, ws, docker_image, platform) %} {% set test_dir_name = name.replace(":", "").replace(" ", "-").replace("-", "_")|string %} diff --git a/cmake/utils/Summary.cmake b/cmake/utils/Summary.cmake index 7059135fb22b..1b973f253a00 100644 --- a/cmake/utils/Summary.cmake +++ b/cmake/utils/Summary.cmake @@ -67,3 +67,10 @@ macro(print_summary) message(STATUS ${OUT} " : " ${OPTION_VALUE}) endforeach() endmacro() + +function(dump_options_to_file tvm_options) + file(REMOVE ${CMAKE_BINARY_DIR}/TVMBuildOptions.txt) + foreach(option ${tvm_options}) + file(APPEND ${CMAKE_BINARY_DIR}/TVMBuildOptions.txt "${option} ${${option}} \n") + endforeach() +endfunction() diff --git a/docker/Dockerfile.ci_minimal b/docker/Dockerfile.ci_minimal new file mode 100644 index 000000000000..cf548989eba2 --- /dev/null +++ b/docker/Dockerfile.ci_minimal @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# CI docker minimal CPU env +FROM ubuntu:18.04 + +COPY utils/apt-install-and-clear.sh /usr/local/bin/apt-install-and-clear + +RUN apt-get update --fix-missing + +COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh +RUN bash /install/ubuntu_install_core.sh + +COPY install/ubuntu_install_googletest.sh /install/ubuntu_install_googletest.sh +RUN bash /install/ubuntu_install_googletest.sh + +COPY install/ubuntu1804_install_python.sh /install/ubuntu1804_install_python.sh +RUN bash /install/ubuntu1804_install_python.sh + +# Globally disable pip cache +RUN pip config set global.no-cache-dir false + +COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh +RUN bash /install/ubuntu_install_python_package.sh + +COPY install/ubuntu1804_manual_install_llvm.sh /install/ubuntu1804_manual_install_llvm.sh +RUN bash /install/ubuntu1804_manual_install_llvm.sh + +# Rust env (build early; takes a while) +COPY install/ubuntu_install_rust.sh /install/ubuntu_install_rust.sh +RUN bash /install/ubuntu_install_rust.sh +ENV RUSTUP_HOME /opt/rust +ENV CARGO_HOME /opt/rust +ENV PATH $PATH:$CARGO_HOME/bin + +# AutoTVM deps +COPY install/ubuntu_install_redis.sh /install/ubuntu_install_redis.sh +RUN bash /install/ubuntu_install_redis.sh + +# sccache +COPY install/ubuntu_install_sccache.sh /install/ubuntu_install_sccache.sh +RUN bash /install/ubuntu_install_sccache.sh +ENV PATH /opt/sccache:$PATH \ No newline at end of file diff --git a/docker/install/ubuntu1804_manual_install_llvm.sh b/docker/install/ubuntu1804_manual_install_llvm.sh new file mode 100755 index 000000000000..f0e9abd1d9fd --- /dev/null +++ b/docker/install/ubuntu1804_manual_install_llvm.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -e +set -u +set -o pipefail + +git clone --depth 1 --branch release/11.x https://github.com/llvm/llvm-project.git +pushd llvm-project +mkdir build +pushd build +cmake \ + -G Ninja \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DLLVM_ENABLE_ASSERTIONS=ON \ + -DLLVM_ENABLE_PROJECTS="llvm;clang" \ + ../llvm +ninja install +popd +popd +rm -rf llvm-project + diff --git a/tests/python/ci/test_ci.py b/tests/python/ci/test_ci.py index 5ab5e6950494..1c2ab1ffb787 100644 --- a/tests/python/ci/test_ci.py +++ b/tests/python/ci/test_ci.py @@ -914,6 +914,7 @@ def test_open_docker_update_pr( "ci_lint", "ci_gpu", "ci_cpu", + "ci_minimal", "ci_wasm", "ci_i386", "ci_cortexm", diff --git a/tests/python/unittest/test_meta_schedule_task_scheduler.py b/tests/python/unittest/test_meta_schedule_task_scheduler.py index fc2497f05303..3edd81ee9a11 100644 --- a/tests/python/unittest/test_meta_schedule_task_scheduler.py +++ b/tests/python/unittest/test_meta_schedule_task_scheduler.py @@ -23,6 +23,7 @@ import pytest import tvm import tvm.testing +from tvm.support import libinfo from tvm import meta_schedule as ms from tvm._ffi.base import TVMError from tvm.meta_schedule.testing.dummy_object import DummyBuilder, DummyRunner diff --git a/tests/scripts/ci.py b/tests/scripts/ci.py index f5c60c94502a..4cc19462c907 100755 --- a/tests/scripts/ci.py +++ b/tests/scripts/ci.py @@ -595,6 +595,19 @@ def add_subparser( "frontend": ("run frontend tests", ["./tests/scripts/task_python_frontend_cpu.sh"]), }, ), + generate_command( + name="minimal", + help="Run minimal CPU build and test(s)", + options={ + "cpp": CPP_UNITTEST, + "unittest": ( + "run unit tests", + [ + "./tests/scripts/task_python_unittest.sh", + ], + ), + }, + ), generate_command( name="i386", help="Run i386 build and test(s)", diff --git a/tests/scripts/task_config_build_minimal.sh b/tests/scripts/task_config_build_minimal.sh new file mode 100755 index 000000000000..651f54cea21b --- /dev/null +++ b/tests/scripts/task_config_build_minimal.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set -euxo pipefail + +BUILD_DIR=$1 +mkdir -p "$BUILD_DIR" +cd "$BUILD_DIR" +cp ../cmake/config.cmake . + +echo set\(USE_SORT ON\) >> config.cmake +echo set\(USE_LLVM llvm-config\) >> config.cmake +echo set\(USE_RELAY_DEBUG ON\) >> config.cmake +echo set\(CMAKE_BUILD_TYPE=Debug\) >> config.cmake +echo set\(CMAKE_CXX_FLAGS \"-Werror -Wp,-D_GLIBCXX_ASSERTIONS\"\) >> config.cmake +echo set\(HIDE_PRIVATE_SYMBOLS ON\) >> config.cmake +echo set\(USE_LIBBACKTRACE ON\) >> config.cmake +echo set\(USE_CCACHE OFF\) >> config.cmake +echo set\(SUMMARIZE ON\) >> config.cmake diff --git a/tests/scripts/task_cpp_unittest.sh b/tests/scripts/task_cpp_unittest.sh index 8ae2e9b1109f..27899d06d703 100755 --- a/tests/scripts/task_cpp_unittest.sh +++ b/tests/scripts/task_cpp_unittest.sh @@ -45,20 +45,22 @@ python3 tests/scripts/task_build.py \ --cmake-target cpptest \ --build-dir "${BUILD_DIR}" -# crttest requires USE_MICRO to be enabled, which is currently the case -# with all CI configs -pushd "${BUILD_DIR}" -ninja crttest -popd - +# crttest requries USE_MICRO to be enabled. +if grep -Fq "USE_MICRO ON" ${BUILD_DIR}/TVMBuildOptions.txt; then + pushd "${BUILD_DIR}" + ninja crttest + popd +fi pushd "${BUILD_DIR}" ctest --gtest_death_test_style=threadsafe popd -# Test MISRA-C runtime -pushd apps/bundle_deploy -rm -rf build -make test_dynamic test_static -popd +# Test MISRA-C runtime. It requires USE_MICRO to be enabled. +if grep -Fq "USE_MICRO ON" ${BUILD_DIR}/TVMBuildOptions.txt; then + pushd apps/bundle_deploy + rm -rf build + make test_dynamic test_static + popd +fi
apache__airflow-11723
All task logging goes to the log for try_number 1 **Apache Airflow version**: 2.0.0a1 **What happened**: When a task fails on the first try, the log output for additional tries go to the log for the first attempt. **What you expected to happen**: The logs should go to the correct log file. For the default configuration, the log filename template is `log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log`, so additional numbered `.log` files should be created. **How to reproduce it**: Create a test dag: ``` from datetime import timedelta from airflow import DAG from airflow.operators.python import PythonOperator from airflow.utils.dates import days_ago with DAG( dag_id="trynumber_demo", default_args={"start_date": days_ago(2), "retries": 1, "retry_delay": timedelta(0)}, schedule_interval=None, ) as dag: def demo_task(ti=None): print("Running demo_task, try_number =", ti.try_number) if ti.try_number <= 1: raise ValueError("Shan't") task = PythonOperator(task_id="demo_task", python_callable=demo_task) ``` and trigger this dag: ``` $ airflow dags trigger trynumber_demo ``` then observe that `triggernumber_demo/demo_task/<execution_date>/` only contains 1.log, which contains the full output for 2 runs: ``` [...] -------------------------------------------------------------------------------- [2020-10-21 13:29:07,958] {taskinstance.py:1020} INFO - Starting attempt 1 of 2 [2020-10-21 13:29:07,959] {taskinstance.py:1021} INFO - -------------------------------------------------------------------------------- [...] [2020-10-21 13:29:08,163] {logging_mixin.py:110} INFO - Running demo_task, try_number = 1 [2020-10-21 13:29:08,164] {taskinstance.py:1348} ERROR - Shan't Traceback (most recent call last): [...] ValueError: Shan't [2020-10-21 13:29:08,168] {taskinstance.py:1392} INFO - Marking task as UP_FOR_RETRY. dag_id=trynumber_demo, task_id=demo_task, execution_date=20201021T122907, start_date=20201021T122907, end_date=20201021T122908 [...] [2020-10-21 13:29:09,121] {taskinstance.py:1019} INFO - -------------------------------------------------------------------------------- [2020-10-21 13:29:09,121] {taskinstance.py:1020} INFO - Starting attempt 2 of 2 [2020-10-21 13:29:09,121] {taskinstance.py:1021} INFO - -------------------------------------------------------------------------------- [...] [2020-10-21 13:29:09,333] {logging_mixin.py:110} INFO - Running demo_task, try_number = 2 [2020-10-21 13:29:09,334] {python.py:141} INFO - Done. Returned value was: None [2020-10-21 13:29:09,355] {taskinstance.py:1143} INFO - Marking task as SUCCESS.dag_id=trynumber_demo, task_id=demo_task, execution_date=20201021T122907, start_date=20201021T122909, end_date=20201021T122909 [2020-10-21 13:29:09,404] {local_task_job.py:117} INFO - Task exited with return code 0 ``` The `TaskInstance()` created for the run needs to first be refreshed from the database, before setting the logging context.
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Task sub-commands\"\"\"\nimport importlib\nimport json\nimport logging\nimport os\nimport textwrap\nfrom contextlib import redirect_stderr, redirect_stdout\nfrom typing import List\n\nfrom tabulate import tabulate\n\nfrom airflow import settings\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowException\nfrom airflow.executors.executor_loader import ExecutorLoader\nfrom airflow.jobs.local_task_job import LocalTaskJob\nfrom airflow.models import DagPickle, TaskInstance\nfrom airflow.models.dag import DAG\nfrom airflow.ti_deps.dep_context import DepContext\nfrom airflow.ti_deps.dependencies_deps import SCHEDULER_QUEUED_DEPS\nfrom airflow.utils import cli as cli_utils\nfrom airflow.utils.cli import get_dag, get_dag_by_file_location, get_dag_by_pickle, get_dags\nfrom airflow.utils.log.logging_mixin import StreamLogWriter\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.session import create_session\n\n\ndef _run_task_by_selected_method(args, dag, ti):\n \"\"\"\n Runs the task in one of 3 modes\n\n - using LocalTaskJob\n - as raw task\n - by executor\n \"\"\"\n if args.local and args.raw:\n raise AirflowException(\n \"Option --raw and --local are mutually exclusive. \"\n \"Please remove one option to execute the command.\"\n )\n if args.local:\n _run_task_by_local_task_job(args, ti)\n elif args.raw:\n _run_raw_task(args, ti)\n else:\n _run_task_by_executor(args, dag, ti)\n\n\ndef _run_task_by_executor(args, dag, ti):\n \"\"\"\n Sends the task to the executor for execution. This can result in the task being started by another host\n if the executor implementation does\n \"\"\"\n pickle_id = None\n if args.ship_dag:\n try:\n # Running remotely, so pickling the DAG\n with create_session() as session:\n pickle = DagPickle(dag)\n session.add(pickle)\n pickle_id = pickle.id\n # TODO: This should be written to a log\n print('Pickled dag {dag} as pickle_id: {pickle_id}'.format(\n dag=dag, pickle_id=pickle_id))\n except Exception as e:\n print('Could not pickle the DAG')\n print(e)\n raise e\n executor = ExecutorLoader.get_default_executor()\n executor.start()\n print(\"Sending to executor.\")\n executor.queue_task_instance(\n ti,\n mark_success=args.mark_success,\n pickle_id=pickle_id,\n ignore_all_deps=args.ignore_all_dependencies,\n ignore_depends_on_past=args.ignore_depends_on_past,\n ignore_task_deps=args.ignore_dependencies,\n ignore_ti_state=args.force,\n pool=args.pool)\n executor.heartbeat()\n executor.end()\n\n\ndef _run_task_by_local_task_job(args, ti):\n \"\"\"Run LocalTaskJob, which monitors the raw task execution process\"\"\"\n run_job = LocalTaskJob(\n task_instance=ti,\n mark_success=args.mark_success,\n pickle_id=args.pickle,\n ignore_all_deps=args.ignore_all_dependencies,\n ignore_depends_on_past=args.ignore_depends_on_past,\n ignore_task_deps=args.ignore_dependencies,\n ignore_ti_state=args.force,\n pool=args.pool)\n run_job.run()\n\n\nRAW_TASK_UNSUPPORTED_OPTION = [\n \"ignore_all_dependencies\", \"ignore_depends_on_past\", \"ignore_dependencies\", \"force\"\n]\n\n\ndef _run_raw_task(args, ti):\n \"\"\"Runs the main task handling code\"\"\"\n unsupported_options = [o for o in RAW_TASK_UNSUPPORTED_OPTION if getattr(args, o)]\n\n if unsupported_options:\n raise AirflowException(\n \"Option --raw does not work with some of the other options on this command. You \"\n \"can't use --raw option and the following options: {}. You provided the option {}. \"\n \"Delete it to execute the command\".format(\n \", \".join(f\"--{o}\" for o in RAW_TASK_UNSUPPORTED_OPTION),\n \", \".join(f\"--{o}\" for o in unsupported_options),\n )\n )\n ti._run_raw_task( # pylint: disable=protected-access\n mark_success=args.mark_success,\n job_id=args.job_id,\n pool=args.pool,\n )\n\n\n@cli_utils.action_logging\ndef task_run(args, dag=None):\n \"\"\"Runs a single task instance\"\"\"\n # Load custom airflow config\n if args.cfg_path:\n with open(args.cfg_path, 'r') as conf_file:\n conf_dict = json.load(conf_file)\n\n if os.path.exists(args.cfg_path):\n os.remove(args.cfg_path)\n\n conf.read_dict(conf_dict, source=args.cfg_path)\n settings.configure_vars()\n\n # IMPORTANT, have to use the NullPool, otherwise, each \"run\" command may leave\n # behind multiple open sleeping connections while heartbeating, which could\n # easily exceed the database connection limit when\n # processing hundreds of simultaneous tasks.\n settings.configure_orm(disable_connection_pool=True)\n\n if dag and args.pickle:\n raise AirflowException(\"You cannot use the --pickle option when using DAG.cli() method.\")\n elif args.pickle:\n print(f'Loading pickle id: {args.pickle}')\n dag = get_dag_by_pickle(args.pickle)\n elif not dag:\n dag = get_dag(args.subdir, args.dag_id)\n else:\n # Use DAG from parameter\n pass\n\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n ti.init_run_context(raw=args.raw)\n\n hostname = get_hostname()\n\n print(f\"Running {ti} on host {hostname}\")\n\n if args.interactive:\n _run_task_by_selected_method(args, dag, ti)\n else:\n if settings.DONOT_MODIFY_HANDLERS:\n with redirect_stdout(StreamLogWriter(ti.log, logging.INFO)), \\\n redirect_stderr(StreamLogWriter(ti.log, logging.WARN)):\n _run_task_by_selected_method(args, dag, ti)\n else:\n # Get all the Handlers from 'airflow.task' logger\n # Add these handlers to the root logger so that we can get logs from\n # any custom loggers defined in the DAG\n airflow_logger_handlers = logging.getLogger('airflow.task').handlers\n root_logger = logging.getLogger()\n root_logger_handlers = root_logger.handlers\n\n # Remove all handlers from Root Logger to avoid duplicate logs\n for handler in root_logger_handlers:\n root_logger.removeHandler(handler)\n\n for handler in airflow_logger_handlers:\n root_logger.addHandler(handler)\n root_logger.setLevel(logging.getLogger('airflow.task').level)\n\n with redirect_stdout(StreamLogWriter(ti.log, logging.INFO)), \\\n redirect_stderr(StreamLogWriter(ti.log, logging.WARN)):\n _run_task_by_selected_method(args, dag, ti)\n\n # We need to restore the handlers to the loggers as celery worker process\n # can call this command multiple times,\n # so if we don't reset this then logs from next task would go to the wrong place\n for handler in airflow_logger_handlers:\n root_logger.removeHandler(handler)\n for handler in root_logger_handlers:\n root_logger.addHandler(handler)\n\n logging.shutdown()\n\n\n@cli_utils.action_logging\ndef task_failed_deps(args):\n \"\"\"\n Returns the unmet dependencies for a task instance from the perspective of the\n scheduler (i.e. why a task instance doesn't get scheduled and then queued by the\n scheduler, and then run by an executor).\n >>> airflow tasks failed-deps tutorial sleep 2015-01-01\n Task instance dependencies not met:\n Dagrun Running: Task instance's dagrun did not exist: Unknown reason\n Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks\n to have succeeded, but found 1 non-success(es).\n \"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n\n dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)\n failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))\n # TODO, Do we want to print or log this\n if failed_deps:\n print(\"Task instance dependencies not met:\")\n for dep in failed_deps:\n print(\"{}: {}\".format(dep.dep_name, dep.reason))\n else:\n print(\"Task instance dependencies are all met.\")\n\n\n@cli_utils.action_logging\ndef task_state(args):\n \"\"\"\n Returns the state of a TaskInstance at the command line.\n >>> airflow tasks state tutorial sleep 2015-01-01\n success\n \"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n print(ti.current_state())\n\n\n@cli_utils.action_logging\ndef task_list(args, dag=None):\n \"\"\"Lists the tasks within a DAG at the command line\"\"\"\n dag = dag or get_dag(args.subdir, args.dag_id)\n if args.tree:\n dag.tree_view()\n else:\n tasks = sorted([t.task_id for t in dag.tasks])\n print(\"\\n\".join(tasks))\n\n\nSUPPORTED_DEBUGGER_MODULES: List[str] = [\n \"pudb\",\n \"web_pdb\",\n \"ipdb\",\n \"pdb\",\n]\n\n\ndef _guess_debugger():\n \"\"\"\n Trying to guess the debugger used by the user. When it doesn't find any user-installed debugger,\n returns ``pdb``.\n\n List of supported debuggers:\n\n * `pudb <https://github.com/inducer/pudb>`__\n * `web_pdb <https://github.com/romanvm/python-web-pdb>`__\n * `ipdb <https://github.com/gotcha/ipdb>`__\n * `pdb <https://docs.python.org/3/library/pdb.html>`__\n \"\"\"\n for mod in SUPPORTED_DEBUGGER_MODULES:\n try:\n return importlib.import_module(mod)\n except ImportError:\n continue\n return importlib.import_module(\"pdb\")\n\n\n@cli_utils.action_logging\ndef task_states_for_dag_run(args):\n \"\"\"Get the status of all task instances in a DagRun\"\"\"\n session = settings.Session()\n\n tis = session.query(\n TaskInstance.dag_id,\n TaskInstance.execution_date,\n TaskInstance.task_id,\n TaskInstance.state,\n TaskInstance.start_date,\n TaskInstance.end_date).filter(\n TaskInstance.dag_id == args.dag_id,\n TaskInstance.execution_date == args.execution_date).all()\n\n if len(tis) == 0:\n raise AirflowException(\"DagRun does not exist.\")\n\n formatted_rows = []\n\n for ti in tis:\n formatted_rows.append((ti.dag_id,\n ti.execution_date,\n ti.task_id,\n ti.state,\n ti.start_date,\n ti.end_date))\n\n print(\n \"\\n%s\" %\n tabulate(\n formatted_rows, [\n 'dag', 'exec_date', 'task', 'state', 'start_date', 'end_date'], tablefmt=args.output))\n\n session.close()\n\n\n@cli_utils.action_logging\ndef task_test(args, dag=None):\n \"\"\"Tests task for a given dag_id\"\"\"\n # We want to log output from operators etc to show up here. Normally\n # airflow.task would redirect to a file, but here we want it to propagate\n # up to the normal airflow handler.\n handlers = logging.getLogger('airflow.task').handlers\n already_has_stream_handler = False\n for handler in handlers:\n already_has_stream_handler = isinstance(handler, logging.StreamHandler)\n if already_has_stream_handler:\n break\n if not already_has_stream_handler:\n logging.getLogger('airflow.task').propagate = True\n\n env_vars = {'AIRFLOW_TEST_MODE': 'True'}\n if args.env_vars:\n env_vars.update(args.env_vars)\n os.environ.update(env_vars)\n\n dag = dag or get_dag(args.subdir, args.dag_id)\n\n task = dag.get_task(task_id=args.task_id)\n # Add CLI provided task_params to task.params\n if args.task_params:\n passed_in_params = json.loads(args.task_params)\n task.params.update(passed_in_params)\n ti = TaskInstance(task, args.execution_date)\n\n try:\n if args.dry_run:\n ti.dry_run()\n else:\n ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)\n except Exception: # pylint: disable=broad-except\n if args.post_mortem:\n debugger = _guess_debugger()\n debugger.post_mortem()\n else:\n raise\n finally:\n if not already_has_stream_handler:\n # Make sure to reset back to normal. When run for CLI this doesn't\n # matter, but it does for test suite\n logging.getLogger('airflow.task').propagate = False\n\n\n@cli_utils.action_logging\ndef task_render(args):\n \"\"\"Renders and displays templated fields for a given task\"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n ti.render_templates()\n for attr in task.__class__.template_fields:\n print(textwrap.dedent(\"\"\"\\\n # ----------------------------------------------------------\n # property: {}\n # ----------------------------------------------------------\n {}\n \"\"\".format(attr, getattr(task, attr))))\n\n\n@cli_utils.action_logging\ndef task_clear(args):\n \"\"\"Clears all task instances or only those matched by regex for a DAG(s)\"\"\"\n logging.basicConfig(\n level=settings.LOGGING_LEVEL,\n format=settings.SIMPLE_LOG_FORMAT)\n\n if args.dag_id and not args.subdir and not args.dag_regex and not args.task_regex:\n dags = get_dag_by_file_location(args.dag_id)\n else:\n # todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?\n dags = get_dags(args.subdir, args.dag_id, use_regex=args.dag_regex)\n\n if args.task_regex:\n for idx, dag in enumerate(dags):\n dags[idx] = dag.partial_subset(\n task_regex=args.task_regex,\n include_downstream=args.downstream,\n include_upstream=args.upstream)\n\n DAG.clear_dags(\n dags,\n start_date=args.start_date,\n end_date=args.end_date,\n only_failed=args.only_failed,\n only_running=args.only_running,\n confirm_prompt=not args.yes,\n include_subdags=not args.exclude_subdags,\n include_parentdag=not args.exclude_parentdag,\n )\n", "path": "airflow/cli/commands/task_command.py" } ]
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Task sub-commands\"\"\"\nimport importlib\nimport json\nimport logging\nimport os\nimport textwrap\nfrom contextlib import redirect_stderr, redirect_stdout\nfrom typing import List\n\nfrom tabulate import tabulate\n\nfrom airflow import settings\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowException\nfrom airflow.executors.executor_loader import ExecutorLoader\nfrom airflow.jobs.local_task_job import LocalTaskJob\nfrom airflow.models import DagPickle, TaskInstance\nfrom airflow.models.dag import DAG\nfrom airflow.ti_deps.dep_context import DepContext\nfrom airflow.ti_deps.dependencies_deps import SCHEDULER_QUEUED_DEPS\nfrom airflow.utils import cli as cli_utils\nfrom airflow.utils.cli import get_dag, get_dag_by_file_location, get_dag_by_pickle, get_dags\nfrom airflow.utils.log.logging_mixin import StreamLogWriter\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.session import create_session\n\n\ndef _run_task_by_selected_method(args, dag, ti):\n \"\"\"\n Runs the task in one of 3 modes\n\n - using LocalTaskJob\n - as raw task\n - by executor\n \"\"\"\n if args.local and args.raw:\n raise AirflowException(\n \"Option --raw and --local are mutually exclusive. \"\n \"Please remove one option to execute the command.\"\n )\n if args.local:\n _run_task_by_local_task_job(args, ti)\n elif args.raw:\n _run_raw_task(args, ti)\n else:\n _run_task_by_executor(args, dag, ti)\n\n\ndef _run_task_by_executor(args, dag, ti):\n \"\"\"\n Sends the task to the executor for execution. This can result in the task being started by another host\n if the executor implementation does\n \"\"\"\n pickle_id = None\n if args.ship_dag:\n try:\n # Running remotely, so pickling the DAG\n with create_session() as session:\n pickle = DagPickle(dag)\n session.add(pickle)\n pickle_id = pickle.id\n # TODO: This should be written to a log\n print('Pickled dag {dag} as pickle_id: {pickle_id}'.format(\n dag=dag, pickle_id=pickle_id))\n except Exception as e:\n print('Could not pickle the DAG')\n print(e)\n raise e\n executor = ExecutorLoader.get_default_executor()\n executor.start()\n print(\"Sending to executor.\")\n executor.queue_task_instance(\n ti,\n mark_success=args.mark_success,\n pickle_id=pickle_id,\n ignore_all_deps=args.ignore_all_dependencies,\n ignore_depends_on_past=args.ignore_depends_on_past,\n ignore_task_deps=args.ignore_dependencies,\n ignore_ti_state=args.force,\n pool=args.pool)\n executor.heartbeat()\n executor.end()\n\n\ndef _run_task_by_local_task_job(args, ti):\n \"\"\"Run LocalTaskJob, which monitors the raw task execution process\"\"\"\n run_job = LocalTaskJob(\n task_instance=ti,\n mark_success=args.mark_success,\n pickle_id=args.pickle,\n ignore_all_deps=args.ignore_all_dependencies,\n ignore_depends_on_past=args.ignore_depends_on_past,\n ignore_task_deps=args.ignore_dependencies,\n ignore_ti_state=args.force,\n pool=args.pool)\n run_job.run()\n\n\nRAW_TASK_UNSUPPORTED_OPTION = [\n \"ignore_all_dependencies\", \"ignore_depends_on_past\", \"ignore_dependencies\", \"force\"\n]\n\n\ndef _run_raw_task(args, ti):\n \"\"\"Runs the main task handling code\"\"\"\n unsupported_options = [o for o in RAW_TASK_UNSUPPORTED_OPTION if getattr(args, o)]\n\n if unsupported_options:\n raise AirflowException(\n \"Option --raw does not work with some of the other options on this command. You \"\n \"can't use --raw option and the following options: {}. You provided the option {}. \"\n \"Delete it to execute the command\".format(\n \", \".join(f\"--{o}\" for o in RAW_TASK_UNSUPPORTED_OPTION),\n \", \".join(f\"--{o}\" for o in unsupported_options),\n )\n )\n ti._run_raw_task( # pylint: disable=protected-access\n mark_success=args.mark_success,\n job_id=args.job_id,\n pool=args.pool,\n )\n\n\n@cli_utils.action_logging\ndef task_run(args, dag=None):\n \"\"\"Runs a single task instance\"\"\"\n # Load custom airflow config\n if args.cfg_path:\n with open(args.cfg_path, 'r') as conf_file:\n conf_dict = json.load(conf_file)\n\n if os.path.exists(args.cfg_path):\n os.remove(args.cfg_path)\n\n conf.read_dict(conf_dict, source=args.cfg_path)\n settings.configure_vars()\n\n # IMPORTANT, have to use the NullPool, otherwise, each \"run\" command may leave\n # behind multiple open sleeping connections while heartbeating, which could\n # easily exceed the database connection limit when\n # processing hundreds of simultaneous tasks.\n settings.configure_orm(disable_connection_pool=True)\n\n if dag and args.pickle:\n raise AirflowException(\"You cannot use the --pickle option when using DAG.cli() method.\")\n elif args.pickle:\n print(f'Loading pickle id: {args.pickle}')\n dag = get_dag_by_pickle(args.pickle)\n elif not dag:\n dag = get_dag(args.subdir, args.dag_id)\n else:\n # Use DAG from parameter\n pass\n\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n ti.refresh_from_db()\n ti.init_run_context(raw=args.raw)\n\n hostname = get_hostname()\n\n print(f\"Running {ti} on host {hostname}\")\n\n if args.interactive:\n _run_task_by_selected_method(args, dag, ti)\n else:\n if settings.DONOT_MODIFY_HANDLERS:\n with redirect_stdout(StreamLogWriter(ti.log, logging.INFO)), \\\n redirect_stderr(StreamLogWriter(ti.log, logging.WARN)):\n _run_task_by_selected_method(args, dag, ti)\n else:\n # Get all the Handlers from 'airflow.task' logger\n # Add these handlers to the root logger so that we can get logs from\n # any custom loggers defined in the DAG\n airflow_logger_handlers = logging.getLogger('airflow.task').handlers\n root_logger = logging.getLogger()\n root_logger_handlers = root_logger.handlers\n\n # Remove all handlers from Root Logger to avoid duplicate logs\n for handler in root_logger_handlers:\n root_logger.removeHandler(handler)\n\n for handler in airflow_logger_handlers:\n root_logger.addHandler(handler)\n root_logger.setLevel(logging.getLogger('airflow.task').level)\n\n with redirect_stdout(StreamLogWriter(ti.log, logging.INFO)), \\\n redirect_stderr(StreamLogWriter(ti.log, logging.WARN)):\n _run_task_by_selected_method(args, dag, ti)\n\n # We need to restore the handlers to the loggers as celery worker process\n # can call this command multiple times,\n # so if we don't reset this then logs from next task would go to the wrong place\n for handler in airflow_logger_handlers:\n root_logger.removeHandler(handler)\n for handler in root_logger_handlers:\n root_logger.addHandler(handler)\n\n logging.shutdown()\n\n\n@cli_utils.action_logging\ndef task_failed_deps(args):\n \"\"\"\n Returns the unmet dependencies for a task instance from the perspective of the\n scheduler (i.e. why a task instance doesn't get scheduled and then queued by the\n scheduler, and then run by an executor).\n >>> airflow tasks failed-deps tutorial sleep 2015-01-01\n Task instance dependencies not met:\n Dagrun Running: Task instance's dagrun did not exist: Unknown reason\n Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks\n to have succeeded, but found 1 non-success(es).\n \"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n\n dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)\n failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))\n # TODO, Do we want to print or log this\n if failed_deps:\n print(\"Task instance dependencies not met:\")\n for dep in failed_deps:\n print(\"{}: {}\".format(dep.dep_name, dep.reason))\n else:\n print(\"Task instance dependencies are all met.\")\n\n\n@cli_utils.action_logging\ndef task_state(args):\n \"\"\"\n Returns the state of a TaskInstance at the command line.\n >>> airflow tasks state tutorial sleep 2015-01-01\n success\n \"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n print(ti.current_state())\n\n\n@cli_utils.action_logging\ndef task_list(args, dag=None):\n \"\"\"Lists the tasks within a DAG at the command line\"\"\"\n dag = dag or get_dag(args.subdir, args.dag_id)\n if args.tree:\n dag.tree_view()\n else:\n tasks = sorted([t.task_id for t in dag.tasks])\n print(\"\\n\".join(tasks))\n\n\nSUPPORTED_DEBUGGER_MODULES: List[str] = [\n \"pudb\",\n \"web_pdb\",\n \"ipdb\",\n \"pdb\",\n]\n\n\ndef _guess_debugger():\n \"\"\"\n Trying to guess the debugger used by the user. When it doesn't find any user-installed debugger,\n returns ``pdb``.\n\n List of supported debuggers:\n\n * `pudb <https://github.com/inducer/pudb>`__\n * `web_pdb <https://github.com/romanvm/python-web-pdb>`__\n * `ipdb <https://github.com/gotcha/ipdb>`__\n * `pdb <https://docs.python.org/3/library/pdb.html>`__\n \"\"\"\n for mod in SUPPORTED_DEBUGGER_MODULES:\n try:\n return importlib.import_module(mod)\n except ImportError:\n continue\n return importlib.import_module(\"pdb\")\n\n\n@cli_utils.action_logging\ndef task_states_for_dag_run(args):\n \"\"\"Get the status of all task instances in a DagRun\"\"\"\n session = settings.Session()\n\n tis = session.query(\n TaskInstance.dag_id,\n TaskInstance.execution_date,\n TaskInstance.task_id,\n TaskInstance.state,\n TaskInstance.start_date,\n TaskInstance.end_date).filter(\n TaskInstance.dag_id == args.dag_id,\n TaskInstance.execution_date == args.execution_date).all()\n\n if len(tis) == 0:\n raise AirflowException(\"DagRun does not exist.\")\n\n formatted_rows = []\n\n for ti in tis:\n formatted_rows.append((ti.dag_id,\n ti.execution_date,\n ti.task_id,\n ti.state,\n ti.start_date,\n ti.end_date))\n\n print(\n \"\\n%s\" %\n tabulate(\n formatted_rows, [\n 'dag', 'exec_date', 'task', 'state', 'start_date', 'end_date'], tablefmt=args.output))\n\n session.close()\n\n\n@cli_utils.action_logging\ndef task_test(args, dag=None):\n \"\"\"Tests task for a given dag_id\"\"\"\n # We want to log output from operators etc to show up here. Normally\n # airflow.task would redirect to a file, but here we want it to propagate\n # up to the normal airflow handler.\n handlers = logging.getLogger('airflow.task').handlers\n already_has_stream_handler = False\n for handler in handlers:\n already_has_stream_handler = isinstance(handler, logging.StreamHandler)\n if already_has_stream_handler:\n break\n if not already_has_stream_handler:\n logging.getLogger('airflow.task').propagate = True\n\n env_vars = {'AIRFLOW_TEST_MODE': 'True'}\n if args.env_vars:\n env_vars.update(args.env_vars)\n os.environ.update(env_vars)\n\n dag = dag or get_dag(args.subdir, args.dag_id)\n\n task = dag.get_task(task_id=args.task_id)\n # Add CLI provided task_params to task.params\n if args.task_params:\n passed_in_params = json.loads(args.task_params)\n task.params.update(passed_in_params)\n ti = TaskInstance(task, args.execution_date)\n\n try:\n if args.dry_run:\n ti.dry_run()\n else:\n ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)\n except Exception: # pylint: disable=broad-except\n if args.post_mortem:\n debugger = _guess_debugger()\n debugger.post_mortem()\n else:\n raise\n finally:\n if not already_has_stream_handler:\n # Make sure to reset back to normal. When run for CLI this doesn't\n # matter, but it does for test suite\n logging.getLogger('airflow.task').propagate = False\n\n\n@cli_utils.action_logging\ndef task_render(args):\n \"\"\"Renders and displays templated fields for a given task\"\"\"\n dag = get_dag(args.subdir, args.dag_id)\n task = dag.get_task(task_id=args.task_id)\n ti = TaskInstance(task, args.execution_date)\n ti.render_templates()\n for attr in task.__class__.template_fields:\n print(textwrap.dedent(\"\"\"\\\n # ----------------------------------------------------------\n # property: {}\n # ----------------------------------------------------------\n {}\n \"\"\".format(attr, getattr(task, attr))))\n\n\n@cli_utils.action_logging\ndef task_clear(args):\n \"\"\"Clears all task instances or only those matched by regex for a DAG(s)\"\"\"\n logging.basicConfig(\n level=settings.LOGGING_LEVEL,\n format=settings.SIMPLE_LOG_FORMAT)\n\n if args.dag_id and not args.subdir and not args.dag_regex and not args.task_regex:\n dags = get_dag_by_file_location(args.dag_id)\n else:\n # todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?\n dags = get_dags(args.subdir, args.dag_id, use_regex=args.dag_regex)\n\n if args.task_regex:\n for idx, dag in enumerate(dags):\n dags[idx] = dag.partial_subset(\n task_regex=args.task_regex,\n include_downstream=args.downstream,\n include_upstream=args.upstream)\n\n DAG.clear_dags(\n dags,\n start_date=args.start_date,\n end_date=args.end_date,\n only_failed=args.only_failed,\n only_running=args.only_running,\n confirm_prompt=not args.yes,\n include_subdags=not args.exclude_subdags,\n include_parentdag=not args.exclude_parentdag,\n )\n", "path": "airflow/cli/commands/task_command.py" } ]
diff --git a/airflow/cli/commands/task_command.py b/airflow/cli/commands/task_command.py index 145d78b917b1d..6d48907aa6d70 100644 --- a/airflow/cli/commands/task_command.py +++ b/airflow/cli/commands/task_command.py @@ -171,6 +171,7 @@ def task_run(args, dag=None): task = dag.get_task(task_id=args.task_id) ti = TaskInstance(task, args.execution_date) + ti.refresh_from_db() ti.init_run_context(raw=args.raw) hostname = get_hostname() diff --git a/tests/cli/commands/test_task_command.py b/tests/cli/commands/test_task_command.py index 0a4b991667749..ed301b2e8e092 100644 --- a/tests/cli/commands/test_task_command.py +++ b/tests/cli/commands/test_task_command.py @@ -31,11 +31,12 @@ from airflow.cli.commands import task_command from airflow.configuration import conf from airflow.exceptions import AirflowException -from airflow.models import DagBag, TaskInstance -from airflow.settings import Session +from airflow.models import DagBag, DagRun, TaskInstance from airflow.utils import timezone from airflow.utils.cli import get_dag +from airflow.utils.session import create_session from airflow.utils.state import State +from airflow.utils.types import DagRunType from tests.test_utils.config import conf_vars from tests.test_utils.db import clear_db_pools, clear_db_runs @@ -46,11 +47,11 @@ def reset(dag_id): - session = Session() - tis = session.query(TaskInstance).filter_by(dag_id=dag_id) - tis.delete() - session.commit() - session.close() + with create_session() as session: + tis = session.query(TaskInstance).filter_by(dag_id=dag_id) + tis.delete() + runs = session.query(DagRun).filter_by(dag_id=dag_id) + runs.delete() class TestCliTasks(unittest.TestCase): @@ -256,8 +257,10 @@ class TestLogsfromTaskRunCommand(unittest.TestCase): def setUp(self) -> None: self.dag_id = "test_logging_dag" self.task_id = "test_task" + self.dag_path = os.path.join(ROOT_FOLDER, "dags", "test_logging_in_dag.py") reset(self.dag_id) - self.execution_date_str = timezone.make_aware(datetime(2017, 1, 1)).isoformat() + self.execution_date = timezone.make_aware(datetime(2017, 1, 1)) + self.execution_date_str = self.execution_date.isoformat() self.log_dir = conf.get('logging', 'base_log_folder') self.log_filename = f"{self.dag_id}/{self.task_id}/{self.execution_date_str}/1.log" self.ti_log_file_path = os.path.join(self.log_dir, self.log_filename) @@ -295,7 +298,7 @@ def test_logging_with_run_task(self): # We are not using self.assertLogs as we want to verify what actually is stored in the Log file # as that is what gets displayed - with conf_vars({('core', 'dags_folder'): os.path.join(ROOT_FOLDER, f"tests/dags/{self.dag_id}")}): + with conf_vars({('core', 'dags_folder'): self.dag_path}): task_command.task_run(self.parser.parse_args([ 'tasks', 'run', self.dag_id, self.task_id, '--local', self.execution_date_str])) @@ -322,7 +325,7 @@ def test_logging_with_run_task(self): def test_logging_with_run_task_subprocess(self): # We are not using self.assertLogs as we want to verify what actually is stored in the Log file # as that is what gets displayed - with conf_vars({('core', 'dags_folder'): os.path.join(ROOT_FOLDER, f"tests/dags/{self.dag_id}")}): + with conf_vars({('core', 'dags_folder'): self.dag_path}): task_command.task_run(self.parser.parse_args([ 'tasks', 'run', self.dag_id, self.task_id, '--local', self.execution_date_str])) @@ -343,6 +346,43 @@ def test_logging_with_run_task_subprocess(self): self.assertIn(f"INFO - Marking task as SUCCESS.dag_id={self.dag_id}, " f"task_id={self.task_id}, execution_date=20170101T000000", logs) + def test_log_file_template_with_run_task(self): + """Verify that the taskinstance has the right context for log_filename_template""" + + with mock.patch.object(task_command, "_run_task_by_selected_method"): + with conf_vars({('core', 'dags_folder'): self.dag_path}): + # increment the try_number of the task to be run + dag = DagBag().get_dag(self.dag_id) + task = dag.get_task(self.task_id) + with create_session() as session: + dag.create_dagrun( + execution_date=self.execution_date, + start_date=timezone.utcnow(), + state=State.RUNNING, + run_type=DagRunType.MANUAL, + session=session, + ) + ti = TaskInstance(task, self.execution_date) + ti.refresh_from_db(session=session, lock_for_update=True) + ti.try_number = 1 # not running, so starts at 0 + session.merge(ti) + + log_file_path = os.path.join(os.path.dirname(self.ti_log_file_path), "2.log") + + try: + task_command.task_run( + self.parser.parse_args( + ['tasks', 'run', self.dag_id, self.task_id, '--local', self.execution_date_str] + ) + ) + + assert os.path.exists(log_file_path) + finally: + try: + os.remove(log_file_path) + except OSError: + pass + class TestCliTaskBackfill(unittest.TestCase): @classmethod
python-telegram-bot__python-telegram-bot-377
Execfile does not exist in py3k <!-- Thanks for reporting issues of python-telegram-bot! To make it easier for us to help you please enter detailed information below. Please note, we only support the latest version of python-telegram-bot and master branch. Please make sure to upgrade & recreate the issue on the latest version prior to opening an issue. --> ### Steps to reproduce 1. Use python 3 2. Try to install from git: `$ pip install -e git+https://github.com/python-telegram-bot/python-telegram-bot.git@555e36ee8036a179f157f60dcb0c3fcf958146f4#egg=telegram` ### Expected behaviour The library should be installed. ### Actual behaviour NameError due to `execfile` not being a thing in python 3. See here for alternatives: https://stackoverflow.com/a/437857 I would fix it myself, but I am unable to actually find the execfile call anywhere .-. ### Configuration **Operating System:** Windows 10 Education **Version of Python, python-telegram-bot & dependencies:** Python 3.5.2 |Continuum Analytics, Inc.| (default, Jul 5 2016, 11:41:13) [MSC v.1900 64 bit (AMD64)] ### Logs `````` $ pip install -e git+https://github.com/python-telegram-bot/python-telegram-bot.git@555e36ee8036a179f157f60dcb0c3fcf958146f4#egg=telegram Obtaining telegram from git+https://github.com/python-telegram-bot/python-telegram-bot.git@555e36ee8036a179f157f60dcb0c3fcf958146f4#egg=telegram Skipping because already up-to-date. Complete output from command python setup.py egg_info: Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\Development\telegram\VocaBot2\src\telegram\setup.py", line 20, in <module> execfile(os.path.join('telegram', 'version.py')) NameError: name 'execfile' is not defined Command "python setup.py egg_info" failed with error code 1``` ``````
[ { "content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n execfile(os.path.join('telegram', 'version.py'))\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://github.com/python-telegram-bot/python-telegram-bot',\n keywords='python telegram bot api wrapper',\n description='Not just a Python wrapper around the Telegram Bot API',\n long_description=fd.read(),\n packages=find_packages(exclude=['tests*']),\n install_requires=requirements(),\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\ndef execfile(fn):\n with open(fn) as f:\n code = compile(f.read(), fn, 'exec')\n exec(code)\n\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n execfile(os.path.join('telegram', 'version.py'))\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://github.com/python-telegram-bot/python-telegram-bot',\n keywords='python telegram bot api wrapper',\n description='Not just a Python wrapper around the Telegram Bot API',\n long_description=fd.read(),\n packages=find_packages(exclude=['tests*']),\n install_requires=requirements(),\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 3b114c51d88..bd57d845bcd 100644 --- a/setup.py +++ b/setup.py @@ -16,6 +16,13 @@ def requirements(): return requirements_list + +def execfile(fn): + with open(fn) as f: + code = compile(f.read(), fn, 'exec') + exec(code) + + with codecs.open('README.rst', 'r', 'utf-8') as fd: execfile(os.path.join('telegram', 'version.py'))
jschneier__django-storages-1087
SFTPStorage: keep trying to connect when timeout (in a loop) I was implementing saving a file in SFTP and, when testing time out, it was never launched. I have debugged and it goes into a loop trying to connect. Specifically: - https://github.com/jschneier/django-storages/blob/master/storages/backends/sftpstorage.py#L67 ``` try: self._ssh.connect(self._host, **self._params) except paramiko.AuthenticationException as e: ``` the exception thrown is socket "timeout" (`from socket import timeout`) I may not have done something correctly, but I have done this workaround (maybe it helps someone, or to illustrate the problem) ``` from socket import timeout from storages.backends.sftpstorage import SFTPStorage class SFTPStorageWithException(SFTPStorage): @property def sftp(self): try: return super().sftp except timeout as exc: log_message(f'Timeout connecting to SFTP: {exc}', 'error') raise paramiko.AuthenticationException(exc) ``` thanks
[ { "content": "# SFTP storage backend for Django.\n# Author: Brent Tubbs <[email protected]>\n# License: MIT\n#\n# Modeled on the FTP storage by Rafal Jonca <[email protected]>\n\nimport getpass\nimport io\nimport os\nimport posixpath\nimport stat\nfrom datetime import datetime\nfrom urllib.parse import urljoin\n\nimport paramiko\nfrom django.core.files.base import File\nfrom django.utils.deconstruct import deconstructible\n\nfrom storages.base import BaseStorage\nfrom storages.utils import setting\n\n\n@deconstructible\nclass SFTPStorage(BaseStorage):\n def __init__(self, **settings):\n super().__init__(**settings)\n self._host = self.host\n self._params = self.params\n self._interactive = self.interactive\n self._file_mode = self.file_mode\n self._dir_mode = self.dir_mode\n self._uid = self.uid\n self._gid = self.gid\n self._known_host_file = self.known_host_file\n self._root_path = self.root_path\n self._base_url = self.base_url\n self._sftp = None\n\n def get_default_settings(self):\n return {\n 'host': setting('SFTP_STORAGE_HOST'),\n 'params': setting('SFTP_STORAGE_PARAMS', {}),\n 'interactive': setting('SFTP_STORAGE_INTERACTIVE', False),\n 'file_mode': setting('SFTP_STORAGE_FILE_MODE'),\n 'dir_mode': setting('SFTP_STORAGE_DIR_MODE'),\n 'uid': setting('SFTP_STORAGE_UID'),\n 'gid': setting('SFTP_STORAGE_GID'),\n 'known_host_file': setting('SFTP_KNOWN_HOST_FILE'),\n 'root_path': setting('SFTP_STORAGE_ROOT', ''),\n 'base_url': setting('MEDIA_URL'),\n }\n\n def _connect(self):\n self._ssh = paramiko.SSHClient()\n\n known_host_file = self._known_host_file or os.path.expanduser(\n os.path.join(\"~\", \".ssh\", \"known_hosts\")\n )\n\n if os.path.exists(known_host_file):\n self._ssh.load_host_keys(known_host_file)\n\n # and automatically add new host keys for hosts we haven't seen before.\n self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n try:\n self._ssh.connect(self._host, **self._params)\n except paramiko.AuthenticationException as e:\n if self._interactive and 'password' not in self._params:\n # If authentication has failed, and we haven't already tried\n # username/password, and configuration allows it, then try\n # again with username/password.\n if 'username' not in self._params:\n self._params['username'] = getpass.getuser()\n self._params['password'] = getpass.getpass()\n self._connect()\n else:\n raise paramiko.AuthenticationException(e)\n\n if self._ssh.get_transport():\n self._sftp = self._ssh.open_sftp()\n\n @property\n def sftp(self):\n \"\"\"Lazy SFTP connection\"\"\"\n if not self._sftp or not self._ssh.get_transport().is_active():\n self._connect()\n return self._sftp\n\n def _remote_path(self, name):\n return posixpath.join(self._root_path, name)\n\n def _open(self, name, mode='rb'):\n return SFTPStorageFile(name, self, mode)\n\n def _read(self, name):\n remote_path = self._remote_path(name)\n return self.sftp.open(remote_path, 'rb')\n\n def _chown(self, path, uid=None, gid=None):\n \"\"\"Set uid and/or gid for file at path.\"\"\"\n # Paramiko's chown requires both uid and gid, so look them up first if\n # we're only supposed to set one.\n if uid is None or gid is None:\n attr = self.sftp.stat(path)\n uid = uid or attr.st_uid\n gid = gid or attr.st_gid\n self.sftp.chown(path, uid, gid)\n\n def _mkdir(self, path):\n \"\"\"Create directory, recursing up to create parent dirs if\n necessary.\"\"\"\n parent = posixpath.dirname(path)\n if not self.exists(parent):\n self._mkdir(parent)\n self.sftp.mkdir(path)\n\n if self._dir_mode is not None:\n self.sftp.chmod(path, self._dir_mode)\n\n if self._uid or self._gid:\n self._chown(path, uid=self._uid, gid=self._gid)\n\n def _save(self, name, content):\n \"\"\"Save file via SFTP.\"\"\"\n content.open()\n path = self._remote_path(name)\n dirname = posixpath.dirname(path)\n if not self.exists(dirname):\n self._mkdir(dirname)\n\n f = self.sftp.open(path, 'wb')\n f.write(content.file.read())\n f.close()\n\n # set file permissions if configured\n if self._file_mode is not None:\n self.sftp.chmod(path, self._file_mode)\n if self._uid or self._gid:\n self._chown(path, uid=self._uid, gid=self._gid)\n return name\n\n def delete(self, name):\n try:\n self.sftp.remove(self._remote_path(name))\n except OSError:\n pass\n\n def exists(self, name):\n try:\n self.sftp.stat(self._remote_path(name))\n return True\n except OSError:\n return False\n\n def _isdir_attr(self, item):\n # Return whether an item in sftp.listdir_attr results is a directory\n if item.st_mode is not None:\n return stat.S_IFMT(item.st_mode) == stat.S_IFDIR\n else:\n return False\n\n def listdir(self, path):\n remote_path = self._remote_path(path)\n dirs, files = [], []\n for item in self.sftp.listdir_attr(remote_path):\n if self._isdir_attr(item):\n dirs.append(item.filename)\n else:\n files.append(item.filename)\n return dirs, files\n\n def size(self, name):\n remote_path = self._remote_path(name)\n return self.sftp.stat(remote_path).st_size\n\n def accessed_time(self, name):\n remote_path = self._remote_path(name)\n utime = self.sftp.stat(remote_path).st_atime\n return datetime.fromtimestamp(utime)\n\n def modified_time(self, name):\n remote_path = self._remote_path(name)\n utime = self.sftp.stat(remote_path).st_mtime\n return datetime.fromtimestamp(utime)\n\n def url(self, name):\n if self._base_url is None:\n raise ValueError(\"This file is not accessible via a URL.\")\n return urljoin(self._base_url, name).replace('\\\\', '/')\n\n\nclass SFTPStorageFile(File):\n def __init__(self, name, storage, mode):\n self.name = name\n self.mode = mode\n self.file = io.BytesIO()\n self._storage = storage\n self._is_read = False\n self._is_dirty = False\n\n @property\n def size(self):\n if not hasattr(self, '_size'):\n self._size = self._storage.size(self.name)\n return self._size\n\n def read(self, num_bytes=None):\n if not self._is_read:\n self.file = self._storage._read(self.name)\n self._is_read = True\n\n return self.file.read(num_bytes)\n\n def write(self, content):\n if 'w' not in self.mode:\n raise AttributeError(\"File was opened for read-only access.\")\n self.file = io.BytesIO(content)\n self._is_dirty = True\n self._is_read = True\n\n def open(self, mode=None):\n if not self.closed:\n self.seek(0)\n elif self.name and self._storage.exists(self.name):\n self.file = self._storage._open(self.name, mode or self.mode)\n else:\n raise ValueError(\"The file cannot be reopened.\")\n\n def close(self):\n if self._is_dirty:\n self._storage._save(self.name, self)\n self.file.close()\n", "path": "storages/backends/sftpstorage.py" } ]
[ { "content": "# SFTP storage backend for Django.\n# Author: Brent Tubbs <[email protected]>\n# License: MIT\n#\n# Modeled on the FTP storage by Rafal Jonca <[email protected]>\n\nimport getpass\nimport io\nimport os\nimport posixpath\nimport stat\nfrom datetime import datetime\nfrom urllib.parse import urljoin\n\nimport paramiko\nfrom django.core.files.base import File\nfrom django.utils.deconstruct import deconstructible\n\nfrom storages.base import BaseStorage\nfrom storages.utils import setting\n\n\n@deconstructible\nclass SFTPStorage(BaseStorage):\n def __init__(self, **settings):\n super().__init__(**settings)\n self._host = self.host\n self._params = self.params\n self._interactive = self.interactive\n self._file_mode = self.file_mode\n self._dir_mode = self.dir_mode\n self._uid = self.uid\n self._gid = self.gid\n self._known_host_file = self.known_host_file\n self._root_path = self.root_path\n self._base_url = self.base_url\n self._sftp = None\n\n def get_default_settings(self):\n return {\n 'host': setting('SFTP_STORAGE_HOST'),\n 'params': setting('SFTP_STORAGE_PARAMS', {}),\n 'interactive': setting('SFTP_STORAGE_INTERACTIVE', False),\n 'file_mode': setting('SFTP_STORAGE_FILE_MODE'),\n 'dir_mode': setting('SFTP_STORAGE_DIR_MODE'),\n 'uid': setting('SFTP_STORAGE_UID'),\n 'gid': setting('SFTP_STORAGE_GID'),\n 'known_host_file': setting('SFTP_KNOWN_HOST_FILE'),\n 'root_path': setting('SFTP_STORAGE_ROOT', ''),\n 'base_url': setting('MEDIA_URL'),\n }\n\n def _connect(self):\n self._ssh = paramiko.SSHClient()\n\n known_host_file = self._known_host_file or os.path.expanduser(\n os.path.join(\"~\", \".ssh\", \"known_hosts\")\n )\n\n if os.path.exists(known_host_file):\n self._ssh.load_host_keys(known_host_file)\n\n # and automatically add new host keys for hosts we haven't seen before.\n self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n try:\n self._ssh.connect(self._host, **self._params)\n except paramiko.AuthenticationException as e:\n if self._interactive and 'password' not in self._params:\n # If authentication has failed, and we haven't already tried\n # username/password, and configuration allows it, then try\n # again with username/password.\n if 'username' not in self._params:\n self._params['username'] = getpass.getuser()\n self._params['password'] = getpass.getpass()\n self._connect()\n else:\n raise paramiko.AuthenticationException(e)\n\n if self._ssh.get_transport():\n self._sftp = self._ssh.open_sftp()\n\n @property\n def sftp(self):\n \"\"\"Lazy SFTP connection\"\"\"\n if not self._sftp or not self._ssh.get_transport().is_active():\n self._connect()\n return self._sftp\n\n def _remote_path(self, name):\n return posixpath.join(self._root_path, name)\n\n def _open(self, name, mode='rb'):\n return SFTPStorageFile(name, self, mode)\n\n def _read(self, name):\n remote_path = self._remote_path(name)\n return self.sftp.open(remote_path, 'rb')\n\n def _chown(self, path, uid=None, gid=None):\n \"\"\"Set uid and/or gid for file at path.\"\"\"\n # Paramiko's chown requires both uid and gid, so look them up first if\n # we're only supposed to set one.\n if uid is None or gid is None:\n attr = self.sftp.stat(path)\n uid = uid or attr.st_uid\n gid = gid or attr.st_gid\n self.sftp.chown(path, uid, gid)\n\n def _mkdir(self, path):\n \"\"\"Create directory, recursing up to create parent dirs if\n necessary.\"\"\"\n parent = posixpath.dirname(path)\n if not self.exists(parent):\n self._mkdir(parent)\n self.sftp.mkdir(path)\n\n if self._dir_mode is not None:\n self.sftp.chmod(path, self._dir_mode)\n\n if self._uid or self._gid:\n self._chown(path, uid=self._uid, gid=self._gid)\n\n def _save(self, name, content):\n \"\"\"Save file via SFTP.\"\"\"\n content.open()\n path = self._remote_path(name)\n dirname = posixpath.dirname(path)\n if not self.exists(dirname):\n self._mkdir(dirname)\n\n f = self.sftp.open(path, 'wb')\n f.write(content.file.read())\n f.close()\n\n # set file permissions if configured\n if self._file_mode is not None:\n self.sftp.chmod(path, self._file_mode)\n if self._uid or self._gid:\n self._chown(path, uid=self._uid, gid=self._gid)\n return name\n\n def delete(self, name):\n try:\n self.sftp.remove(self._remote_path(name))\n except OSError:\n pass\n\n def exists(self, name):\n try:\n self.sftp.stat(self._remote_path(name))\n return True\n except FileNotFoundError:\n return False\n\n def _isdir_attr(self, item):\n # Return whether an item in sftp.listdir_attr results is a directory\n if item.st_mode is not None:\n return stat.S_IFMT(item.st_mode) == stat.S_IFDIR\n else:\n return False\n\n def listdir(self, path):\n remote_path = self._remote_path(path)\n dirs, files = [], []\n for item in self.sftp.listdir_attr(remote_path):\n if self._isdir_attr(item):\n dirs.append(item.filename)\n else:\n files.append(item.filename)\n return dirs, files\n\n def size(self, name):\n remote_path = self._remote_path(name)\n return self.sftp.stat(remote_path).st_size\n\n def accessed_time(self, name):\n remote_path = self._remote_path(name)\n utime = self.sftp.stat(remote_path).st_atime\n return datetime.fromtimestamp(utime)\n\n def modified_time(self, name):\n remote_path = self._remote_path(name)\n utime = self.sftp.stat(remote_path).st_mtime\n return datetime.fromtimestamp(utime)\n\n def url(self, name):\n if self._base_url is None:\n raise ValueError(\"This file is not accessible via a URL.\")\n return urljoin(self._base_url, name).replace('\\\\', '/')\n\n\nclass SFTPStorageFile(File):\n def __init__(self, name, storage, mode):\n self.name = name\n self.mode = mode\n self.file = io.BytesIO()\n self._storage = storage\n self._is_read = False\n self._is_dirty = False\n\n @property\n def size(self):\n if not hasattr(self, '_size'):\n self._size = self._storage.size(self.name)\n return self._size\n\n def read(self, num_bytes=None):\n if not self._is_read:\n self.file = self._storage._read(self.name)\n self._is_read = True\n\n return self.file.read(num_bytes)\n\n def write(self, content):\n if 'w' not in self.mode:\n raise AttributeError(\"File was opened for read-only access.\")\n self.file = io.BytesIO(content)\n self._is_dirty = True\n self._is_read = True\n\n def open(self, mode=None):\n if not self.closed:\n self.seek(0)\n elif self.name and self._storage.exists(self.name):\n self.file = self._storage._open(self.name, mode or self.mode)\n else:\n raise ValueError(\"The file cannot be reopened.\")\n\n def close(self):\n if self._is_dirty:\n self._storage._save(self.name, self)\n self.file.close()\n", "path": "storages/backends/sftpstorage.py" } ]
diff --git a/storages/backends/sftpstorage.py b/storages/backends/sftpstorage.py index 529daf278..643685c88 100644 --- a/storages/backends/sftpstorage.py +++ b/storages/backends/sftpstorage.py @@ -150,7 +150,7 @@ def exists(self, name): try: self.sftp.stat(self._remote_path(name)) return True - except OSError: + except FileNotFoundError: return False def _isdir_attr(self, item): diff --git a/tests/test_sftp.py b/tests/test_sftp.py index 094c4447f..448358f29 100644 --- a/tests/test_sftp.py +++ b/tests/test_sftp.py @@ -1,5 +1,6 @@ import io import os +import socket import stat from datetime import datetime from unittest.mock import MagicMock, patch @@ -56,7 +57,7 @@ def test_mkdir(self, mock_sftp): self.assertEqual(mock_sftp.mkdir.call_args[0], ('foo',)) @patch('storages.backends.sftpstorage.SFTPStorage.sftp', **{ - 'stat.side_effect': (IOError(), True) + 'stat.side_effect': (FileNotFoundError(), True) }) def test_mkdir_parent(self, mock_sftp): self.storage._mkdir('bar/foo') @@ -69,7 +70,7 @@ def test_save(self, mock_sftp): self.assertTrue(mock_sftp.open.return_value.write.called) @patch('storages.backends.sftpstorage.SFTPStorage.sftp', **{ - 'stat.side_effect': (IOError(), True) + 'stat.side_effect': (FileNotFoundError(), True) }) def test_save_in_subdir(self, mock_sftp): self.storage._save('bar/foo', File(io.BytesIO(b'foo'), 'foo')) @@ -86,11 +87,18 @@ def test_exists(self, mock_sftp): self.assertTrue(self.storage.exists('foo')) @patch('storages.backends.sftpstorage.SFTPStorage.sftp', **{ - 'stat.side_effect': IOError() + 'stat.side_effect': FileNotFoundError() }) def test_not_exists(self, mock_sftp): self.assertFalse(self.storage.exists('foo')) + @patch('storages.backends.sftpstorage.SFTPStorage.sftp', **{ + 'stat.side_effect': socket.timeout() + }) + def test_not_exists_timeout(self, mock_sftp): + with self.assertRaises(socket.timeout): + self.storage.exists('foo') + @patch('storages.backends.sftpstorage.SFTPStorage.sftp', **{ 'listdir_attr.return_value': [MagicMock(filename='foo', st_mode=stat.S_IFDIR),
vyperlang__vyper-3340
Bug: compiler dislikes `x not in [a, b]` in 0.3.8, whereas it was fine in 0.3.7 ### Version Information * vyper Version (output of `vyper --version`): 0.3.8 * OS: osx * Python Version (output of `python --version`): 3.10.4 ### What's your issue about? <img width="705" alt="image" src="https://user-images.githubusercontent.com/11488427/230437774-c3b68030-9319-4169-b344-dbb470002102.png">
[ { "content": "from typing import Dict\n\nfrom vyper.semantics.analysis.base import VarInfo\nfrom vyper.semantics.types import AddressT, BytesT, VyperType\nfrom vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T\n\n\n# common properties for environment variables\nclass _EnvType(VyperType):\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass _Block(_EnvType):\n _id = \"block\"\n _type_members = {\n \"coinbase\": AddressT(),\n \"difficulty\": UINT256_T,\n \"prevrandao\": UINT256_T,\n \"number\": UINT256_T,\n \"gaslimit\": UINT256_T,\n \"basefee\": UINT256_T,\n \"prevhash\": BYTES32_T,\n \"timestamp\": UINT256_T,\n }\n\n\nclass _Chain(_EnvType):\n _id = \"chain\"\n _type_members = {\"id\": UINT256_T}\n\n\nclass _Msg(_EnvType):\n _id = \"msg\"\n _type_members = {\"data\": BytesT(), \"gas\": UINT256_T, \"sender\": AddressT(), \"value\": UINT256_T}\n\n\nclass _Tx(_EnvType):\n _id = \"tx\"\n _type_members = {\"origin\": AddressT(), \"gasprice\": UINT256_T}\n\n\nCONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}\n\n\ndef get_constant_vars() -> Dict:\n \"\"\"\n Get a dictionary of constant environment variables.\n \"\"\"\n result = {}\n for k, v in CONSTANT_ENVIRONMENT_VARS.items():\n result[k] = VarInfo(v, is_constant=True)\n\n return result\n\n\n# Not sure this is necessary, but add an ad-hoc type for `self` for clarity\nclass _SelfT(AddressT):\n pass\n\n\nMUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": _SelfT}\n\n\ndef get_mutable_vars() -> Dict:\n \"\"\"\n Get a dictionary of mutable environment variables (those that are\n modified during the course of contract execution, such as `self`).\n \"\"\"\n return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}\n", "path": "vyper/semantics/environment.py" } ]
[ { "content": "from typing import Dict\n\nfrom vyper.semantics.analysis.base import VarInfo\nfrom vyper.semantics.types import AddressT, BytesT, VyperType\nfrom vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T\n\n\n# common properties for environment variables\nclass _EnvType(VyperType):\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass _Block(_EnvType):\n _id = \"block\"\n _type_members = {\n \"coinbase\": AddressT(),\n \"difficulty\": UINT256_T,\n \"prevrandao\": UINT256_T,\n \"number\": UINT256_T,\n \"gaslimit\": UINT256_T,\n \"basefee\": UINT256_T,\n \"prevhash\": BYTES32_T,\n \"timestamp\": UINT256_T,\n }\n\n\nclass _Chain(_EnvType):\n _id = \"chain\"\n _type_members = {\"id\": UINT256_T}\n\n\nclass _Msg(_EnvType):\n _id = \"msg\"\n _type_members = {\"data\": BytesT(), \"gas\": UINT256_T, \"sender\": AddressT(), \"value\": UINT256_T}\n\n\nclass _Tx(_EnvType):\n _id = \"tx\"\n _type_members = {\"origin\": AddressT(), \"gasprice\": UINT256_T}\n\n\nCONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}\n\n\ndef get_constant_vars() -> Dict:\n \"\"\"\n Get a dictionary of constant environment variables.\n \"\"\"\n result = {}\n for k, v in CONSTANT_ENVIRONMENT_VARS.items():\n result[k] = VarInfo(v, is_constant=True)\n\n return result\n\n\nMUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": AddressT}\n\n\ndef get_mutable_vars() -> Dict:\n \"\"\"\n Get a dictionary of mutable environment variables (those that are\n modified during the course of contract execution, such as `self`).\n \"\"\"\n return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}\n", "path": "vyper/semantics/environment.py" } ]
diff --git a/vyper/semantics/environment.py b/vyper/semantics/environment.py index 0f915a2161..ad68f1103e 100644 --- a/vyper/semantics/environment.py +++ b/vyper/semantics/environment.py @@ -57,12 +57,7 @@ def get_constant_vars() -> Dict: return result -# Not sure this is necessary, but add an ad-hoc type for `self` for clarity -class _SelfT(AddressT): - pass - - -MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": _SelfT} +MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": AddressT} def get_mutable_vars() -> Dict:
ansible__ansible-modules-extras-1291
pam_limits - documentation is not updated `limit_type` choices are `hard`, `soft` in the [documentation](http://docs.ansible.com/ansible/pam_limits_module.html) but in the [code](https://github.com/ansible/ansible-modules-extras/blob/devel/system/pam_limits.py#L95) `-` is supported. pam_limits - documentation is not updated `limit_type` choices are `hard`, `soft` in the [documentation](http://docs.ansible.com/ansible/pam_limits_module.html) but in the [code](https://github.com/ansible/ansible-modules-extras/blob/devel/system/pam_limits.py#L95) `-` is supported.
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Sebastien Rohaut <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nimport os\nimport os.path\nimport shutil\nimport re\n\nDOCUMENTATION = '''\n---\nmodule: pam_limits\nversion_added: \"2.0\"\nshort_description: Modify Linux PAM limits\ndescription:\n - The M(pam_limits) module modify PAM limits, default in /etc/security/limits.conf.\n For the full documentation, see man limits.conf(5).\noptions:\n domain:\n description:\n - A username, @groupname, wildcard, uid/gid range.\n required: true\n limit_type:\n description:\n - Limit type, see C(man limits) for an explanation\n required: true\n choices: [ \"hard\", \"soft\" ]\n limit_item:\n description:\n - The limit to be set\n required: true\n choices: [ \"core\", \"data\", \"fsize\", \"memlock\", \"nofile\", \"rss\", \"stack\", \"cpu\", \"nproc\", \"as\", \"maxlogins\", \"maxsyslogins\", \"priority\", \"locks\", \"sigpending\", \"msgqueue\", \"nice\", \"rtprio\", \"chroot\" ]\n value:\n description:\n - The value of the limit.\n required: true\n backup:\n description:\n - Create a backup file including the timestamp information so you can get\n the original file back if you somehow clobbered it incorrectly.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n use_min:\n description:\n - If set to C(yes), the minimal value will be used or conserved.\n If the specified value is inferior to the value in the file, file content is replaced with the new value,\n else content is not modified.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n use_max:\n description:\n - If set to C(yes), the maximal value will be used or conserved.\n If the specified value is superior to the value in the file, file content is replaced with the new value,\n else content is not modified.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n dest:\n description:\n - Modify the limits.conf path.\n required: false\n default: \"/etc/security/limits.conf\"\n'''\n\nEXAMPLES = '''\n# Add or modify limits for the user joe\n- pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000\n\n# Add or modify limits for the user joe. Keep or set the maximal value\n- pam_limits: domain=joe limit_type=soft limit_item=nofile value=1000000\n'''\n\ndef main():\n\n pam_items = [ 'core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot' ]\n\n pam_types = [ 'soft', 'hard', '-' ]\n\n limits_conf = '/etc/security/limits.conf'\n\n module = AnsibleModule(\n # not checking because of daisy chain to file module\n argument_spec = dict(\n domain = dict(required=True, type='str'),\n limit_type = dict(required=True, type='str', choices=pam_types),\n limit_item = dict(required=True, type='str', choices=pam_items),\n value = dict(required=True, type='str'),\n use_max = dict(default=False, type='bool'),\n use_min = dict(default=False, type='bool'),\n backup = dict(default=False, type='bool'),\n dest = dict(default=limits_conf, type='str'),\n comment = dict(required=False, default='', type='str')\n )\n )\n\n domain = module.params['domain']\n limit_type = module.params['limit_type']\n limit_item = module.params['limit_item']\n value = module.params['value']\n use_max = module.params['use_max']\n use_min = module.params['use_min']\n backup = module.params['backup']\n limits_conf = module.params['dest']\n new_comment = module.params['comment']\n\n changed = False\n\n if os.path.isfile(limits_conf):\n if not os.access(limits_conf, os.W_OK):\n module.fail_json(msg=\"%s is not writable. Use sudo\" % (limits_conf) )\n else:\n module.fail_json(msg=\"%s is not visible (check presence, access rights, use sudo)\" % (limits_conf) )\n\n if use_max and use_min:\n module.fail_json(msg=\"Cannot use use_min and use_max at the same time.\" )\n\n if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):\n module.fail_json(msg=\"Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.\")\n\n # Backup\n if backup:\n backup_file = module.backup_local(limits_conf)\n\n space_pattern = re.compile(r'\\s+')\n\n message = ''\n f = open (limits_conf, 'r')\n # Tempfile\n nf = tempfile.NamedTemporaryFile(delete = False)\n\n found = False\n new_value = value\n\n for line in f:\n\n if line.startswith('#'):\n nf.write(line)\n continue\n\n newline = re.sub(space_pattern, ' ', line).strip()\n if not newline:\n nf.write(line)\n continue\n\n # Remove comment in line\n newline = newline.split('#',1)[0]\n try:\n old_comment = line.split('#',1)[1]\n except:\n old_comment = ''\n\n newline = newline.rstrip()\n\n if not new_comment:\n new_comment = old_comment\n\n if new_comment:\n new_comment = \"\\t#\"+new_comment\n\n line_fields = newline.split(' ')\n\n if len(line_fields) != 4:\n nf.write(line)\n continue\n\n line_domain = line_fields[0]\n line_type = line_fields[1]\n line_item = line_fields[2]\n actual_value = line_fields[3]\n\n if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):\n module.fail_json(msg=\"Invalid configuration of '%s'. Current value of %s is unsupported.\" % (limits_conf, line_item))\n\n # Found the line\n if line_domain == domain and line_type == limit_type and line_item == limit_item:\n found = True\n if value == actual_value:\n message = line\n nf.write(line)\n continue\n\n actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']\n value_unlimited = value in ['unlimited', 'infinity', '-1']\n\n if use_max:\n if value.isdigit() and actual_value.isdigit():\n new_value = max(int(value), int(actual_value))\n elif actual_value_unlimited:\n new_value = actual_value\n else:\n new_value = value\n\n if use_min:\n if value.isdigit() and actual_value.isdigit():\n new_value = min(int(value), int(actual_value))\n elif value_unlimited:\n new_value = actual_value\n else:\n new_value = value\n\n # Change line only if value has changed\n if new_value != actual_value:\n changed = True\n new_limit = domain + \"\\t\" + limit_type + \"\\t\" + limit_item + \"\\t\" + str(new_value) + new_comment + \"\\n\"\n message = new_limit\n nf.write(new_limit)\n else:\n message = line\n nf.write(line)\n else:\n nf.write(line)\n\n if not found:\n changed = True\n new_limit = domain + \"\\t\" + limit_type + \"\\t\" + limit_item + \"\\t\" + str(new_value) + new_comment + \"\\n\"\n message = new_limit\n nf.write(new_limit)\n\n f.close()\n nf.close()\n\n # Copy tempfile to newfile\n module.atomic_move(nf.name, f.name)\n\n res_args = dict(\n changed = changed, msg = message\n )\n\n if backup:\n res_args['backup_file'] = backup_file\n\n module.exit_json(**res_args)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "system/pam_limits.py" } ]
[ { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Sebastien Rohaut <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nimport os\nimport os.path\nimport shutil\nimport re\n\nDOCUMENTATION = '''\n---\nmodule: pam_limits\nversion_added: \"2.0\"\nshort_description: Modify Linux PAM limits\ndescription:\n - The M(pam_limits) module modify PAM limits, default in /etc/security/limits.conf.\n For the full documentation, see man limits.conf(5).\noptions:\n domain:\n description:\n - A username, @groupname, wildcard, uid/gid range.\n required: true\n limit_type:\n description:\n - Limit type, see C(man limits) for an explanation\n required: true\n choices: [ \"hard\", \"soft\", \"-\" ]\n limit_item:\n description:\n - The limit to be set\n required: true\n choices: [ \"core\", \"data\", \"fsize\", \"memlock\", \"nofile\", \"rss\", \"stack\", \"cpu\", \"nproc\", \"as\", \"maxlogins\", \"maxsyslogins\", \"priority\", \"locks\", \"sigpending\", \"msgqueue\", \"nice\", \"rtprio\", \"chroot\" ]\n value:\n description:\n - The value of the limit.\n required: true\n backup:\n description:\n - Create a backup file including the timestamp information so you can get\n the original file back if you somehow clobbered it incorrectly.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n use_min:\n description:\n - If set to C(yes), the minimal value will be used or conserved.\n If the specified value is inferior to the value in the file, file content is replaced with the new value,\n else content is not modified.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n use_max:\n description:\n - If set to C(yes), the maximal value will be used or conserved.\n If the specified value is superior to the value in the file, file content is replaced with the new value,\n else content is not modified.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n dest:\n description:\n - Modify the limits.conf path.\n required: false\n default: \"/etc/security/limits.conf\"\n'''\n\nEXAMPLES = '''\n# Add or modify limits for the user joe\n- pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000\n\n# Add or modify limits for the user joe. Keep or set the maximal value\n- pam_limits: domain=joe limit_type=soft limit_item=nofile value=1000000\n'''\n\ndef main():\n\n pam_items = [ 'core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot' ]\n\n pam_types = [ 'soft', 'hard', '-' ]\n\n limits_conf = '/etc/security/limits.conf'\n\n module = AnsibleModule(\n # not checking because of daisy chain to file module\n argument_spec = dict(\n domain = dict(required=True, type='str'),\n limit_type = dict(required=True, type='str', choices=pam_types),\n limit_item = dict(required=True, type='str', choices=pam_items),\n value = dict(required=True, type='str'),\n use_max = dict(default=False, type='bool'),\n use_min = dict(default=False, type='bool'),\n backup = dict(default=False, type='bool'),\n dest = dict(default=limits_conf, type='str'),\n comment = dict(required=False, default='', type='str')\n )\n )\n\n domain = module.params['domain']\n limit_type = module.params['limit_type']\n limit_item = module.params['limit_item']\n value = module.params['value']\n use_max = module.params['use_max']\n use_min = module.params['use_min']\n backup = module.params['backup']\n limits_conf = module.params['dest']\n new_comment = module.params['comment']\n\n changed = False\n\n if os.path.isfile(limits_conf):\n if not os.access(limits_conf, os.W_OK):\n module.fail_json(msg=\"%s is not writable. Use sudo\" % (limits_conf) )\n else:\n module.fail_json(msg=\"%s is not visible (check presence, access rights, use sudo)\" % (limits_conf) )\n\n if use_max and use_min:\n module.fail_json(msg=\"Cannot use use_min and use_max at the same time.\" )\n\n if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):\n module.fail_json(msg=\"Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.\")\n\n # Backup\n if backup:\n backup_file = module.backup_local(limits_conf)\n\n space_pattern = re.compile(r'\\s+')\n\n message = ''\n f = open (limits_conf, 'r')\n # Tempfile\n nf = tempfile.NamedTemporaryFile(delete = False)\n\n found = False\n new_value = value\n\n for line in f:\n\n if line.startswith('#'):\n nf.write(line)\n continue\n\n newline = re.sub(space_pattern, ' ', line).strip()\n if not newline:\n nf.write(line)\n continue\n\n # Remove comment in line\n newline = newline.split('#',1)[0]\n try:\n old_comment = line.split('#',1)[1]\n except:\n old_comment = ''\n\n newline = newline.rstrip()\n\n if not new_comment:\n new_comment = old_comment\n\n if new_comment:\n new_comment = \"\\t#\"+new_comment\n\n line_fields = newline.split(' ')\n\n if len(line_fields) != 4:\n nf.write(line)\n continue\n\n line_domain = line_fields[0]\n line_type = line_fields[1]\n line_item = line_fields[2]\n actual_value = line_fields[3]\n\n if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):\n module.fail_json(msg=\"Invalid configuration of '%s'. Current value of %s is unsupported.\" % (limits_conf, line_item))\n\n # Found the line\n if line_domain == domain and line_type == limit_type and line_item == limit_item:\n found = True\n if value == actual_value:\n message = line\n nf.write(line)\n continue\n\n actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']\n value_unlimited = value in ['unlimited', 'infinity', '-1']\n\n if use_max:\n if value.isdigit() and actual_value.isdigit():\n new_value = max(int(value), int(actual_value))\n elif actual_value_unlimited:\n new_value = actual_value\n else:\n new_value = value\n\n if use_min:\n if value.isdigit() and actual_value.isdigit():\n new_value = min(int(value), int(actual_value))\n elif value_unlimited:\n new_value = actual_value\n else:\n new_value = value\n\n # Change line only if value has changed\n if new_value != actual_value:\n changed = True\n new_limit = domain + \"\\t\" + limit_type + \"\\t\" + limit_item + \"\\t\" + str(new_value) + new_comment + \"\\n\"\n message = new_limit\n nf.write(new_limit)\n else:\n message = line\n nf.write(line)\n else:\n nf.write(line)\n\n if not found:\n changed = True\n new_limit = domain + \"\\t\" + limit_type + \"\\t\" + limit_item + \"\\t\" + str(new_value) + new_comment + \"\\n\"\n message = new_limit\n nf.write(new_limit)\n\n f.close()\n nf.close()\n\n # Copy tempfile to newfile\n module.atomic_move(nf.name, f.name)\n\n res_args = dict(\n changed = changed, msg = message\n )\n\n if backup:\n res_args['backup_file'] = backup_file\n\n module.exit_json(**res_args)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "system/pam_limits.py" } ]
diff --git a/system/pam_limits.py b/system/pam_limits.py index eb04021c3e0..4003f76d3f8 100644 --- a/system/pam_limits.py +++ b/system/pam_limits.py @@ -40,7 +40,7 @@ description: - Limit type, see C(man limits) for an explanation required: true - choices: [ "hard", "soft" ] + choices: [ "hard", "soft", "-" ] limit_item: description: - The limit to be set
microsoft__botbuilder-python-2050
botbuidler support for regex== 2022 and above Description: I'm currently working on building a chatbot using Azure Bot Builder SDK in conjunction with OpenAI. In my project, I'm relying on the OpenAIEmbedding class from the langchain package, which utilizes Tiktoken. However, I've run into an issue due to dependency conflicts with Tiktoken. Specifically, Tiktoken requires regex version 2022 or higher, while the Bot Builder package supports only up to regex version 2019. Feature Request: I kindly request adding support for Tiktoken's regex version 2022 or higher in the OpenAIEmbedding class within the langchain package. This update would resolve the dependency conflicts and enable smoother integration of OpenAI into projects using Azure Bot Builder SDK. Additional Information: Current Behavior: Currently, the OpenAIEmbedding class in langchain relies on Tiktoken, which necessitates a regex version that is not compatible with the Bot Builder SDK's regex version support. Desired Behavior: The botbuilder classes should be updated to support Tiktoken's dependency on regex version 2022 or higher t Impact of the Feature: This feature would benefit developers working on chatbot projects that use Azure Bot Builder SDK and OpenAI. It would eliminate dependency conflicts, allowing for a seamless integration experience.
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"regex<=2019.08.19\",\n \"emoji==1.7.0\",\n \"recognizers-text-date-time>=1.0.2a1\",\n \"recognizers-text-number-with-unit>=1.0.2a1\",\n \"recognizers-text-number>=1.0.2a1\",\n \"recognizers-text>=1.0.2a1\",\n \"recognizers-text-choice>=1.0.2a1\",\n \"babel==2.9.1\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\n\nTEST_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"dialogs\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderDialogs\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.dialogs\",\n \"botbuilder.dialogs.prompts\",\n \"botbuilder.dialogs.choices\",\n \"botbuilder.dialogs.skills\",\n \"botbuilder.dialogs.memory\",\n \"botbuilder.dialogs.memory.path_resolvers\",\n \"botbuilder.dialogs.memory.scopes\",\n ],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-dialogs/setup.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"regex>=2022.1.18\",\n \"emoji==1.7.0\",\n \"recognizers-text-date-time>=1.0.2a1\",\n \"recognizers-text-number-with-unit>=1.0.2a1\",\n \"recognizers-text-number>=1.0.2a1\",\n \"recognizers-text>=1.0.2a1\",\n \"recognizers-text-choice>=1.0.2a1\",\n \"babel==2.9.1\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\n\nTEST_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"dialogs\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderDialogs\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.dialogs\",\n \"botbuilder.dialogs.prompts\",\n \"botbuilder.dialogs.choices\",\n \"botbuilder.dialogs.skills\",\n \"botbuilder.dialogs.memory\",\n \"botbuilder.dialogs.memory.path_resolvers\",\n \"botbuilder.dialogs.memory.scopes\",\n ],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-dialogs/setup.py" } ]
diff --git a/libraries/botbuilder-dialogs/setup.py b/libraries/botbuilder-dialogs/setup.py index 6e97715a5..574f8bbe7 100644 --- a/libraries/botbuilder-dialogs/setup.py +++ b/libraries/botbuilder-dialogs/setup.py @@ -5,7 +5,7 @@ from setuptools import setup REQUIRES = [ - "regex<=2019.08.19", + "regex>=2022.1.18", "emoji==1.7.0", "recognizers-text-date-time>=1.0.2a1", "recognizers-text-number-with-unit>=1.0.2a1",
coala__coala-4980
Compatibility.py: Add comment explaining JSONDecodeError is missing in Python 3.4 difficulty/newcomer Opened via [gitter](https://gitter.im/coala/coala/?at=59098da68fcce56b205cd7e0) by @jayvdb
[ { "content": "import json\ntry:\n JSONDecodeError = json.decoder.JSONDecodeError\nexcept AttributeError: # pragma Python 3.5,3.6: no cover\n JSONDecodeError = ValueError\n", "path": "coalib/misc/Compatibility.py" } ]
[ { "content": "import json\ntry:\n # JSONDecodeError class is available since Python 3.5.x.\n JSONDecodeError = json.decoder.JSONDecodeError\nexcept AttributeError: # pragma Python 3.5,3.6: no cover\n JSONDecodeError = ValueError\n", "path": "coalib/misc/Compatibility.py" } ]
diff --git a/coalib/misc/Compatibility.py b/coalib/misc/Compatibility.py index f81e818544..f0e63d63b3 100644 --- a/coalib/misc/Compatibility.py +++ b/coalib/misc/Compatibility.py @@ -1,5 +1,6 @@ import json try: + # JSONDecodeError class is available since Python 3.5.x. JSONDecodeError = json.decoder.JSONDecodeError except AttributeError: # pragma Python 3.5,3.6: no cover JSONDecodeError = ValueError
open-telemetry__opentelemetry-python-2962
Fix dead link There is a dead link in the CHANGELOG, [here](https://github.com/open-telemetry/opentelemetry-python/blob/main/CHANGELOG.md?plain=1#L13).
[ { "content": "#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport re\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nfrom configparser import ConfigParser\nfrom datetime import datetime\nfrom inspect import cleandoc\nfrom itertools import chain\nfrom os.path import basename\nfrom pathlib import Path, PurePath\n\nDEFAULT_ALLSEP = \" \"\nDEFAULT_ALLFMT = \"{rel}\"\n\n\ndef unique(elems):\n seen = set()\n for elem in elems:\n if elem not in seen:\n yield elem\n seen.add(elem)\n\n\nsubprocess_run = subprocess.run\n\n\ndef extraargs_help(calledcmd):\n return cleandoc(\n f\"\"\"\n Additional arguments to pass on to {calledcmd}.\n\n This is collected from any trailing arguments passed to `%(prog)s`.\n Use an initial `--` to separate them from regular arguments.\n \"\"\"\n )\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(description=\"Development helper script.\")\n parser.set_defaults(parser=parser)\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Only display what would be done, don't actually do anything.\",\n )\n subparsers = parser.add_subparsers(metavar=\"COMMAND\")\n subparsers.required = True\n\n excparser = subparsers.add_parser(\n \"exec\",\n help=\"Run a command for each or all targets.\",\n formatter_class=argparse.RawTextHelpFormatter,\n description=cleandoc(\n \"\"\"Run a command according to the `format` argument for each or all targets.\n\n This is an advanced command that is used internally by other commands.\n\n For example, to install all distributions in this repository\n editable, you could use:\n\n scripts/eachdist.py exec \"python -m pip install -e {}\"\n\n This will run pip for all distributions which is quite slow. It gets\n a bit faster if we only invoke pip once but with all the paths\n gathered together, which can be achieved by using `--all`:\n\n scripts/eachdist.py exec \"python -m pip install {}\" --all \"-e {}\"\n\n The sortfirst option in the DEFAULT section of eachdist.ini makes\n sure that dependencies are installed before their dependents.\n\n Search for usages of `parse_subargs` in the source code of this script\n to see more examples.\n\n This command first collects target paths and then executes\n commands according to `format` and `--all`.\n\n Target paths are initially all Python distribution root paths\n (as determined by the existence of pyproject.toml, etc. files).\n They are then augmented according to the section of the\n `PROJECT_ROOT/eachdist.ini` config file specified by the `--mode` option.\n\n The following config options are available (and processed in that order):\n\n - `extraroots`: List of project root-relative glob expressions.\n The resulting paths will be added.\n - `sortfirst`: List of glob expressions.\n Any matching paths will be put to the front of the path list,\n in the same order they appear in this option. If more than one\n glob matches, ordering is according to the first.\n - `subglob`: List of glob expressions. Each path added so far is removed\n and replaced with the result of all glob expressions relative to it (in\n order of the glob expressions).\n\n After all this, any duplicate paths are removed (the first occurrence remains).\n \"\"\"\n ),\n )\n excparser.set_defaults(func=execute_args)\n excparser.add_argument(\n \"format\",\n help=cleandoc(\n \"\"\"Format string for the command to execute.\n\n The available replacements depend on whether `--all` is specified.\n If `--all` was specified, there is only a single replacement,\n `{}`, that is replaced with the string that is generated from\n joining all targets formatted with `--all` to a single string\n with the value of `--allsep` as separator.\n\n If `--all` was not specified, the following replacements are available:\n\n - `{}`: the absolute path to the current target in POSIX format\n (with forward slashes)\n - `{rel}`: like `{}` but relative to the project root.\n - `{raw}`: the absolute path to the current target in native format\n (thus exactly the same as `{}` on Unix but with backslashes on Windows).\n - `{rawrel}`: like `{raw}` but relative to the project root.\n\n The resulting string is then split according to POSIX shell rules\n (so you can use quotation marks or backslashes to handle arguments\n containing spaces).\n\n The first token is the name of the executable to run, the remaining\n tokens are the arguments.\n\n Note that a shell is *not* involved by default.\n You can add bash/sh/cmd/powershell yourself to the format if you want.\n\n If `--all` was specified, the resulting command is simply executed once.\n Otherwise, the command is executed for each found target. In both cases,\n the project root is the working directory.\n \"\"\"\n ),\n )\n excparser.add_argument(\n \"--all\",\n nargs=\"?\",\n const=DEFAULT_ALLFMT,\n metavar=\"ALLFORMAT\",\n help=cleandoc(\n \"\"\"Instead of running the command for each target, join all target\n paths together to run a single command.\n\n This option optionally takes a format string to apply to each path. The\n available replacements are the ones that would be available for `format`\n if `--all` was not specified.\n\n Default ALLFORMAT if this flag is specified: `%(const)s`.\n \"\"\"\n ),\n )\n excparser.add_argument(\n \"--allsep\",\n help=cleandoc(\n \"\"\"Separator string for the strings resulting from `--all`.\n Only valid if `--all` is specified.\n \"\"\"\n ),\n )\n excparser.add_argument(\n \"--allowexitcode\",\n type=int,\n action=\"append\",\n default=[0],\n help=cleandoc(\n \"\"\"The given command exit code is treated as success and does not abort execution.\n Can be specified multiple times.\n \"\"\"\n ),\n )\n excparser.add_argument(\n \"--mode\",\n \"-m\",\n default=\"DEFAULT\",\n help=cleandoc(\n \"\"\"Section of config file to use for target selection configuration.\n See description of exec for available options.\"\"\"\n ),\n )\n\n instparser = subparsers.add_parser(\n \"install\", help=\"Install all distributions.\"\n )\n\n def setup_instparser(instparser):\n instparser.set_defaults(func=install_args)\n instparser.add_argument(\n \"pipargs\", nargs=argparse.REMAINDER, help=extraargs_help(\"pip\")\n )\n\n setup_instparser(instparser)\n instparser.add_argument(\"--editable\", \"-e\", action=\"store_true\")\n instparser.add_argument(\"--with-test-deps\", action=\"store_true\")\n instparser.add_argument(\"--with-dev-deps\", action=\"store_true\")\n instparser.add_argument(\"--eager-upgrades\", action=\"store_true\")\n\n devparser = subparsers.add_parser(\n \"develop\",\n help=\"Install all distributions editable + dev dependencies.\",\n )\n setup_instparser(devparser)\n devparser.set_defaults(\n editable=True,\n with_dev_deps=True,\n eager_upgrades=True,\n with_test_deps=True,\n )\n\n lintparser = subparsers.add_parser(\n \"lint\", help=\"Lint everything, autofixing if possible.\"\n )\n lintparser.add_argument(\"--check-only\", action=\"store_true\")\n lintparser.set_defaults(func=lint_args)\n\n testparser = subparsers.add_parser(\n \"test\",\n help=\"Test everything (run pytest yourself for more complex operations).\",\n )\n testparser.set_defaults(func=test_args)\n testparser.add_argument(\n \"pytestargs\", nargs=argparse.REMAINDER, help=extraargs_help(\"pytest\")\n )\n\n releaseparser = subparsers.add_parser(\n \"update_versions\",\n help=\"Updates version numbers, used by maintainers and CI\",\n )\n releaseparser.set_defaults(func=release_args)\n releaseparser.add_argument(\"--versions\", required=True)\n releaseparser.add_argument(\n \"releaseargs\", nargs=argparse.REMAINDER, help=extraargs_help(\"pytest\")\n )\n\n fmtparser = subparsers.add_parser(\n \"format\",\n help=\"Formats all source code with black and isort.\",\n )\n fmtparser.set_defaults(func=format_args)\n fmtparser.add_argument(\n \"--path\",\n required=False,\n help=\"Format only this path instead of entire repository\",\n )\n\n versionparser = subparsers.add_parser(\n \"version\",\n help=\"Get the version for a release\",\n )\n versionparser.set_defaults(func=version_args)\n versionparser.add_argument(\n \"--mode\",\n \"-m\",\n default=\"DEFAULT\",\n help=cleandoc(\n \"\"\"Section of config file to use for target selection configuration.\n See description of exec for available options.\"\"\"\n ),\n )\n\n return parser.parse_args(args)\n\n\ndef find_projectroot(search_start=Path(\".\")):\n root = search_start.resolve()\n for root in chain((root,), root.parents):\n if any((root / marker).exists() for marker in (\".git\", \"tox.ini\")):\n return root\n return None\n\n\ndef find_targets_unordered(rootpath):\n for subdir in rootpath.iterdir():\n if not subdir.is_dir():\n continue\n if subdir.name.startswith(\".\") or subdir.name.startswith(\"venv\"):\n continue\n if any(\n (subdir / marker).exists()\n for marker in (\"setup.py\", \"pyproject.toml\")\n ):\n yield subdir\n else:\n yield from find_targets_unordered(subdir)\n\n\ndef getlistcfg(strval):\n return [\n val.strip()\n for line in strval.split(\"\\n\")\n for val in line.split(\",\")\n if val.strip()\n ]\n\n\ndef find_targets(mode, rootpath):\n if not rootpath:\n sys.exit(\"Could not find a root directory.\")\n\n cfg = ConfigParser()\n cfg.read(str(rootpath / \"eachdist.ini\"))\n mcfg = cfg[mode]\n\n targets = list(find_targets_unordered(rootpath))\n if \"extraroots\" in mcfg:\n targets += [\n path\n for extraglob in getlistcfg(mcfg[\"extraroots\"])\n for path in rootpath.glob(extraglob)\n ]\n if \"sortfirst\" in mcfg:\n sortfirst = getlistcfg(mcfg[\"sortfirst\"])\n\n def keyfunc(path):\n path = path.relative_to(rootpath)\n for idx, pattern in enumerate(sortfirst):\n if path.match(pattern):\n return idx\n return float(\"inf\")\n\n targets.sort(key=keyfunc)\n if \"ignore\" in mcfg:\n ignore = getlistcfg(mcfg[\"ignore\"])\n\n def filter_func(path):\n path = path.relative_to(rootpath)\n for pattern in ignore:\n if path.match(pattern):\n return False\n return True\n\n filtered = filter(filter_func, targets)\n targets = list(filtered)\n\n subglobs = getlistcfg(mcfg.get(\"subglob\", \"\"))\n if subglobs:\n targets = [\n newentry\n for newentry in (\n target / subdir\n for target in targets\n for subglob in subglobs\n # We need to special-case the dot, because glob fails to parse that with an IndexError.\n for subdir in (\n (target,) if subglob == \".\" else target.glob(subglob)\n )\n )\n if \".egg-info\" not in str(newentry) and newentry.exists()\n ]\n\n return list(unique(targets))\n\n\ndef runsubprocess(dry_run, params, *args, **kwargs):\n cmdstr = join_args(params)\n if dry_run:\n print(cmdstr)\n return None\n\n # Py < 3.6 compat.\n cwd = kwargs.get(\"cwd\")\n if cwd and isinstance(cwd, PurePath):\n kwargs[\"cwd\"] = str(cwd)\n\n check = kwargs.pop(\"check\") # Enforce specifying check\n\n print(\">>>\", cmdstr, file=sys.stderr, flush=True)\n\n # This is a workaround for subprocess.run(['python']) leaving the virtualenv on Win32.\n # The cause for this is that when running the python.exe in a virtualenv,\n # the wrapper executable launches the global python as a subprocess and the search sequence\n # for CreateProcessW which subprocess.run and Popen use is a follows\n # (https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessw):\n # > 1. The directory from which the application loaded.\n # This will be the directory of the global python.exe, not the venv directory, due to the suprocess mechanism.\n # > 6. The directories that are listed in the PATH environment variable.\n # Only this would find the \"correct\" python.exe.\n\n params = list(params)\n executable = shutil.which(params[0])\n if executable:\n params[0] = executable\n try:\n return subprocess_run(params, *args, check=check, **kwargs)\n except OSError as exc:\n raise ValueError(\n \"Failed executing \" + repr(params) + \": \" + str(exc)\n ) from exc\n\n\ndef execute_args(args):\n if args.allsep and not args.all:\n args.parser.error(\"--allsep specified but not --all.\")\n\n if args.all and not args.allsep:\n args.allsep = DEFAULT_ALLSEP\n\n rootpath = find_projectroot()\n targets = find_targets(args.mode, rootpath)\n if not targets:\n sys.exit(f\"Error: No targets selected (root: {rootpath})\")\n\n def fmt_for_path(fmt, path):\n return fmt.format(\n path.as_posix(),\n rel=path.relative_to(rootpath).as_posix(),\n raw=path,\n rawrel=path.relative_to(rootpath),\n )\n\n def _runcmd(cmd):\n result = runsubprocess(\n args.dry_run, shlex.split(cmd), cwd=rootpath, check=False\n )\n if result is not None and result.returncode not in args.allowexitcode:\n print(\n f\"'{cmd}' failed with code {result.returncode}\",\n file=sys.stderr,\n )\n sys.exit(result.returncode)\n\n if args.all:\n allstr = args.allsep.join(\n fmt_for_path(args.all, path) for path in targets\n )\n cmd = args.format.format(allstr)\n _runcmd(cmd)\n else:\n for target in targets:\n cmd = fmt_for_path(args.format, target)\n _runcmd(cmd)\n\n\ndef clean_remainder_args(remainder_args):\n if remainder_args and remainder_args[0] == \"--\":\n del remainder_args[0]\n\n\ndef join_args(arglist):\n return \" \".join(map(shlex.quote, arglist))\n\n\ndef install_args(args):\n clean_remainder_args(args.pipargs)\n if args.eager_upgrades:\n args.pipargs += [\"--upgrade-strategy=eager\"]\n\n if args.with_dev_deps:\n runsubprocess(\n args.dry_run,\n [\n \"python\",\n \"-m\",\n \"pip\",\n \"install\",\n \"--upgrade\",\n \"pip\",\n \"setuptools\",\n \"wheel\",\n ]\n + args.pipargs,\n check=True,\n )\n\n allfmt = \"-e 'file://{}\" if args.editable else \"'file://{}\"\n # packages should provide an extra_requires that is named\n # 'test', to denote test dependencies.\n extras = []\n if args.with_test_deps:\n extras.append(\"test\")\n if extras:\n allfmt += f\"[{','.join(extras)}]\"\n # note the trailing single quote, to close the quote opened above.\n allfmt += \"'\"\n\n execute_args(\n parse_subargs(\n args,\n (\n \"exec\",\n \"python -m pip install {} \" + join_args(args.pipargs),\n \"--all\",\n allfmt,\n ),\n )\n )\n if args.with_dev_deps:\n rootpath = find_projectroot()\n runsubprocess(\n args.dry_run,\n [\n \"python\",\n \"-m\",\n \"pip\",\n \"install\",\n \"--upgrade\",\n \"-r\",\n str(rootpath / \"dev-requirements.txt\"),\n ]\n + args.pipargs,\n check=True,\n )\n\n\ndef parse_subargs(parentargs, args):\n subargs = parse_args(args)\n subargs.dry_run = parentargs.dry_run or subargs.dry_run\n return subargs\n\n\ndef lint_args(args):\n rootdir = str(find_projectroot())\n\n runsubprocess(\n args.dry_run,\n (\"black\", \"--config\", \"pyproject.toml\", \".\") + ((\"--diff\", \"--check\") if args.check_only else ()),\n cwd=rootdir,\n check=True,\n )\n runsubprocess(\n args.dry_run,\n (\"isort\", \"--settings-path\", \".isort.cfg\", \".\")\n + ((\"--diff\", \"--check-only\") if args.check_only else ()),\n cwd=rootdir,\n check=True,\n )\n runsubprocess(args.dry_run, (\"flake8\", \"--config\", \".flake8\", rootdir), check=True)\n execute_args(\n parse_subargs(\n args, (\"exec\", \"pylint {}\", \"--all\", \"--mode\", \"lintroots\")\n )\n )\n execute_args(\n parse_subargs(\n args,\n (\n \"exec\",\n \"python scripts/check_for_valid_readme.py {}\",\n \"--all\",\n ),\n )\n )\n\n\ndef update_changelog(path, version, new_entry):\n unreleased_changes = False\n try:\n with open(path, encoding=\"utf-8\") as changelog:\n text = changelog.read()\n if f\"## [{version}]\" in text:\n raise AttributeError(\n f\"{path} already contains version {version}\"\n )\n with open(path, encoding=\"utf-8\") as changelog:\n for line in changelog:\n if line.startswith(\"## [Unreleased]\"):\n unreleased_changes = False\n elif line.startswith(\"## \"):\n break\n elif len(line.strip()) > 0:\n unreleased_changes = True\n\n except FileNotFoundError:\n print(f\"file missing: {path}\")\n return\n\n if unreleased_changes:\n print(f\"updating: {path}\")\n text = re.sub(r\"## \\[Unreleased\\].*\", new_entry, text)\n with open(path, \"w\", encoding=\"utf-8\") as changelog:\n changelog.write(text)\n\n\ndef update_changelogs(version):\n today = datetime.now().strftime(\"%Y-%m-%d\")\n new_entry = \"\"\"## [Unreleased](https://github.com/open-telemetry/opentelemetry-python/compare/v{version}...HEAD)\n\n## [{version}](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v{version}) - {today}\n\n\"\"\".format(\n version=version, today=today\n )\n errors = False\n try:\n update_changelog(\"./CHANGELOG.md\", version, new_entry)\n except Exception as err: # pylint: disable=broad-except\n print(str(err))\n errors = True\n\n if errors:\n sys.exit(1)\n\n\ndef find(name, path):\n for root, _, files in os.walk(path):\n if name in files:\n return os.path.join(root, name)\n return None\n\n\ndef filter_packages(targets, packages):\n filtered_packages = []\n for target in targets:\n for pkg in packages:\n if pkg in str(target):\n filtered_packages.append(target)\n break\n return filtered_packages\n\n\ndef update_version_files(targets, version, packages):\n print(\"updating version.py files\")\n targets = filter_packages(targets, packages)\n update_files(\n targets,\n \"version.py\",\n \"__version__ .*\",\n f'__version__ = \"{version}\"',\n )\n\n\ndef update_dependencies(targets, version, packages):\n print(\"updating dependencies\")\n for pkg in packages:\n update_files(\n targets,\n \"pyproject.toml\",\n rf\"({basename(pkg)}.*)==(.*)\",\n r\"\\1== \" + version + '\",',\n )\n\n\ndef update_files(targets, filename, search, replace):\n errors = False\n for target in targets:\n curr_file = find(filename, target)\n if curr_file is None:\n print(f\"file missing: {target}/{filename}\")\n continue\n\n with open(curr_file, encoding=\"utf-8\") as _file:\n text = _file.read()\n\n if replace in text:\n print(f\"{curr_file} already contains {replace}\")\n continue\n\n with open(curr_file, \"w\", encoding=\"utf-8\") as _file:\n _file.write(re.sub(search, replace, text))\n\n if errors:\n sys.exit(1)\n\n\ndef release_args(args):\n print(\"preparing release\")\n\n rootpath = find_projectroot()\n targets = list(find_targets_unordered(rootpath))\n cfg = ConfigParser()\n cfg.read(str(find_projectroot() / \"eachdist.ini\"))\n versions = args.versions\n updated_versions = []\n for group in versions.split(\",\"):\n mcfg = cfg[group]\n version = mcfg[\"version\"]\n updated_versions.append(version)\n packages = mcfg[\"packages\"].split()\n print(f\"update {group} packages to {version}\")\n update_dependencies(targets, version, packages)\n update_version_files(targets, version, packages)\n\n update_changelogs(\"-\".join(updated_versions))\n\n\ndef test_args(args):\n clean_remainder_args(args.pytestargs)\n execute_args(\n parse_subargs(\n args,\n (\n \"exec\",\n \"pytest {} \" + join_args(args.pytestargs),\n \"--mode\",\n \"testroots\",\n ),\n )\n )\n\n\ndef format_args(args):\n root_dir = format_dir = str(find_projectroot())\n if args.path:\n format_dir = os.path.join(format_dir, args.path)\n\n runsubprocess(\n args.dry_run,\n (\"black\", \"--config\", f\"{root_dir}/pyproject.toml\", \".\"),\n cwd=format_dir,\n check=True,\n )\n runsubprocess(\n args.dry_run,\n (\"isort\", \"--settings-path\", f\"{root_dir}/.isort.cfg\", \"--profile\", \"black\", \".\"),\n cwd=format_dir,\n check=True,\n )\n\n\ndef version_args(args):\n cfg = ConfigParser()\n cfg.read(str(find_projectroot() / \"eachdist.ini\"))\n print(cfg[args.mode][\"version\"])\n\n\ndef main():\n args = parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/eachdist.py" } ]
[ { "content": "#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport re\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nfrom configparser import ConfigParser\nfrom datetime import datetime\nfrom inspect import cleandoc\nfrom itertools import chain\nfrom os.path import basename\nfrom pathlib import Path, PurePath\n\nDEFAULT_ALLSEP = \" \"\nDEFAULT_ALLFMT = \"{rel}\"\n\n\ndef unique(elems):\n seen = set()\n for elem in elems:\n if elem not in seen:\n yield elem\n seen.add(elem)\n\n\nsubprocess_run = subprocess.run\n\n\ndef extraargs_help(calledcmd):\n return cleandoc(\n f\"\"\"\n Additional arguments to pass on to {calledcmd}.\n\n This is collected from any trailing arguments passed to `%(prog)s`.\n Use an initial `--` to separate them from regular arguments.\n \"\"\"\n )\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(description=\"Development helper script.\")\n parser.set_defaults(parser=parser)\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Only display what would be done, don't actually do anything.\",\n )\n subparsers = parser.add_subparsers(metavar=\"COMMAND\")\n subparsers.required = True\n\n excparser = subparsers.add_parser(\n \"exec\",\n help=\"Run a command for each or all targets.\",\n formatter_class=argparse.RawTextHelpFormatter,\n description=cleandoc(\n \"\"\"Run a command according to the `format` argument for each or all targets.\n\n This is an advanced command that is used internally by other commands.\n\n For example, to install all distributions in this repository\n editable, you could use:\n\n scripts/eachdist.py exec \"python -m pip install -e {}\"\n\n This will run pip for all distributions which is quite slow. It gets\n a bit faster if we only invoke pip once but with all the paths\n gathered together, which can be achieved by using `--all`:\n\n scripts/eachdist.py exec \"python -m pip install {}\" --all \"-e {}\"\n\n The sortfirst option in the DEFAULT section of eachdist.ini makes\n sure that dependencies are installed before their dependents.\n\n Search for usages of `parse_subargs` in the source code of this script\n to see more examples.\n\n This command first collects target paths and then executes\n commands according to `format` and `--all`.\n\n Target paths are initially all Python distribution root paths\n (as determined by the existence of pyproject.toml, etc. files).\n They are then augmented according to the section of the\n `PROJECT_ROOT/eachdist.ini` config file specified by the `--mode` option.\n\n The following config options are available (and processed in that order):\n\n - `extraroots`: List of project root-relative glob expressions.\n The resulting paths will be added.\n - `sortfirst`: List of glob expressions.\n Any matching paths will be put to the front of the path list,\n in the same order they appear in this option. If more than one\n glob matches, ordering is according to the first.\n - `subglob`: List of glob expressions. Each path added so far is removed\n and replaced with the result of all glob expressions relative to it (in\n order of the glob expressions).\n\n After all this, any duplicate paths are removed (the first occurrence remains).\n \"\"\"\n ),\n )\n excparser.set_defaults(func=execute_args)\n excparser.add_argument(\n \"format\",\n help=cleandoc(\n \"\"\"Format string for the command to execute.\n\n The available replacements depend on whether `--all` is specified.\n If `--all` was specified, there is only a single replacement,\n `{}`, that is replaced with the string that is generated from\n joining all targets formatted with `--all` to a single string\n with the value of `--allsep` as separator.\n\n If `--all` was not specified, the following replacements are available:\n\n - `{}`: the absolute path to the current target in POSIX format\n (with forward slashes)\n - `{rel}`: like `{}` but relative to the project root.\n - `{raw}`: the absolute path to the current target in native format\n (thus exactly the same as `{}` on Unix but with backslashes on Windows).\n - `{rawrel}`: like `{raw}` but relative to the project root.\n\n The resulting string is then split according to POSIX shell rules\n (so you can use quotation marks or backslashes to handle arguments\n containing spaces).\n\n The first token is the name of the executable to run, the remaining\n tokens are the arguments.\n\n Note that a shell is *not* involved by default.\n You can add bash/sh/cmd/powershell yourself to the format if you want.\n\n If `--all` was specified, the resulting command is simply executed once.\n Otherwise, the command is executed for each found target. In both cases,\n the project root is the working directory.\n \"\"\"\n ),\n )\n excparser.add_argument(\n \"--all\",\n nargs=\"?\",\n const=DEFAULT_ALLFMT,\n metavar=\"ALLFORMAT\",\n help=cleandoc(\n \"\"\"Instead of running the command for each target, join all target\n paths together to run a single command.\n\n This option optionally takes a format string to apply to each path. The\n available replacements are the ones that would be available for `format`\n if `--all` was not specified.\n\n Default ALLFORMAT if this flag is specified: `%(const)s`.\n \"\"\"\n ),\n )\n excparser.add_argument(\n \"--allsep\",\n help=cleandoc(\n \"\"\"Separator string for the strings resulting from `--all`.\n Only valid if `--all` is specified.\n \"\"\"\n ),\n )\n excparser.add_argument(\n \"--allowexitcode\",\n type=int,\n action=\"append\",\n default=[0],\n help=cleandoc(\n \"\"\"The given command exit code is treated as success and does not abort execution.\n Can be specified multiple times.\n \"\"\"\n ),\n )\n excparser.add_argument(\n \"--mode\",\n \"-m\",\n default=\"DEFAULT\",\n help=cleandoc(\n \"\"\"Section of config file to use for target selection configuration.\n See description of exec for available options.\"\"\"\n ),\n )\n\n instparser = subparsers.add_parser(\n \"install\", help=\"Install all distributions.\"\n )\n\n def setup_instparser(instparser):\n instparser.set_defaults(func=install_args)\n instparser.add_argument(\n \"pipargs\", nargs=argparse.REMAINDER, help=extraargs_help(\"pip\")\n )\n\n setup_instparser(instparser)\n instparser.add_argument(\"--editable\", \"-e\", action=\"store_true\")\n instparser.add_argument(\"--with-test-deps\", action=\"store_true\")\n instparser.add_argument(\"--with-dev-deps\", action=\"store_true\")\n instparser.add_argument(\"--eager-upgrades\", action=\"store_true\")\n\n devparser = subparsers.add_parser(\n \"develop\",\n help=\"Install all distributions editable + dev dependencies.\",\n )\n setup_instparser(devparser)\n devparser.set_defaults(\n editable=True,\n with_dev_deps=True,\n eager_upgrades=True,\n with_test_deps=True,\n )\n\n lintparser = subparsers.add_parser(\n \"lint\", help=\"Lint everything, autofixing if possible.\"\n )\n lintparser.add_argument(\"--check-only\", action=\"store_true\")\n lintparser.set_defaults(func=lint_args)\n\n testparser = subparsers.add_parser(\n \"test\",\n help=\"Test everything (run pytest yourself for more complex operations).\",\n )\n testparser.set_defaults(func=test_args)\n testparser.add_argument(\n \"pytestargs\", nargs=argparse.REMAINDER, help=extraargs_help(\"pytest\")\n )\n\n releaseparser = subparsers.add_parser(\n \"update_versions\",\n help=\"Updates version numbers, used by maintainers and CI\",\n )\n releaseparser.set_defaults(func=release_args)\n releaseparser.add_argument(\"--versions\", required=True)\n releaseparser.add_argument(\n \"releaseargs\", nargs=argparse.REMAINDER, help=extraargs_help(\"pytest\")\n )\n\n fmtparser = subparsers.add_parser(\n \"format\",\n help=\"Formats all source code with black and isort.\",\n )\n fmtparser.set_defaults(func=format_args)\n fmtparser.add_argument(\n \"--path\",\n required=False,\n help=\"Format only this path instead of entire repository\",\n )\n\n versionparser = subparsers.add_parser(\n \"version\",\n help=\"Get the version for a release\",\n )\n versionparser.set_defaults(func=version_args)\n versionparser.add_argument(\n \"--mode\",\n \"-m\",\n default=\"DEFAULT\",\n help=cleandoc(\n \"\"\"Section of config file to use for target selection configuration.\n See description of exec for available options.\"\"\"\n ),\n )\n\n return parser.parse_args(args)\n\n\ndef find_projectroot(search_start=Path(\".\")):\n root = search_start.resolve()\n for root in chain((root,), root.parents):\n if any((root / marker).exists() for marker in (\".git\", \"tox.ini\")):\n return root\n return None\n\n\ndef find_targets_unordered(rootpath):\n for subdir in rootpath.iterdir():\n if not subdir.is_dir():\n continue\n if subdir.name.startswith(\".\") or subdir.name.startswith(\"venv\"):\n continue\n if any(\n (subdir / marker).exists()\n for marker in (\"setup.py\", \"pyproject.toml\")\n ):\n yield subdir\n else:\n yield from find_targets_unordered(subdir)\n\n\ndef getlistcfg(strval):\n return [\n val.strip()\n for line in strval.split(\"\\n\")\n for val in line.split(\",\")\n if val.strip()\n ]\n\n\ndef find_targets(mode, rootpath):\n if not rootpath:\n sys.exit(\"Could not find a root directory.\")\n\n cfg = ConfigParser()\n cfg.read(str(rootpath / \"eachdist.ini\"))\n mcfg = cfg[mode]\n\n targets = list(find_targets_unordered(rootpath))\n if \"extraroots\" in mcfg:\n targets += [\n path\n for extraglob in getlistcfg(mcfg[\"extraroots\"])\n for path in rootpath.glob(extraglob)\n ]\n if \"sortfirst\" in mcfg:\n sortfirst = getlistcfg(mcfg[\"sortfirst\"])\n\n def keyfunc(path):\n path = path.relative_to(rootpath)\n for idx, pattern in enumerate(sortfirst):\n if path.match(pattern):\n return idx\n return float(\"inf\")\n\n targets.sort(key=keyfunc)\n if \"ignore\" in mcfg:\n ignore = getlistcfg(mcfg[\"ignore\"])\n\n def filter_func(path):\n path = path.relative_to(rootpath)\n for pattern in ignore:\n if path.match(pattern):\n return False\n return True\n\n filtered = filter(filter_func, targets)\n targets = list(filtered)\n\n subglobs = getlistcfg(mcfg.get(\"subglob\", \"\"))\n if subglobs:\n targets = [\n newentry\n for newentry in (\n target / subdir\n for target in targets\n for subglob in subglobs\n # We need to special-case the dot, because glob fails to parse that with an IndexError.\n for subdir in (\n (target,) if subglob == \".\" else target.glob(subglob)\n )\n )\n if \".egg-info\" not in str(newentry) and newentry.exists()\n ]\n\n return list(unique(targets))\n\n\ndef runsubprocess(dry_run, params, *args, **kwargs):\n cmdstr = join_args(params)\n if dry_run:\n print(cmdstr)\n return None\n\n # Py < 3.6 compat.\n cwd = kwargs.get(\"cwd\")\n if cwd and isinstance(cwd, PurePath):\n kwargs[\"cwd\"] = str(cwd)\n\n check = kwargs.pop(\"check\") # Enforce specifying check\n\n print(\">>>\", cmdstr, file=sys.stderr, flush=True)\n\n # This is a workaround for subprocess.run(['python']) leaving the virtualenv on Win32.\n # The cause for this is that when running the python.exe in a virtualenv,\n # the wrapper executable launches the global python as a subprocess and the search sequence\n # for CreateProcessW which subprocess.run and Popen use is a follows\n # (https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessw):\n # > 1. The directory from which the application loaded.\n # This will be the directory of the global python.exe, not the venv directory, due to the suprocess mechanism.\n # > 6. The directories that are listed in the PATH environment variable.\n # Only this would find the \"correct\" python.exe.\n\n params = list(params)\n executable = shutil.which(params[0])\n if executable:\n params[0] = executable\n try:\n return subprocess_run(params, *args, check=check, **kwargs)\n except OSError as exc:\n raise ValueError(\n \"Failed executing \" + repr(params) + \": \" + str(exc)\n ) from exc\n\n\ndef execute_args(args):\n if args.allsep and not args.all:\n args.parser.error(\"--allsep specified but not --all.\")\n\n if args.all and not args.allsep:\n args.allsep = DEFAULT_ALLSEP\n\n rootpath = find_projectroot()\n targets = find_targets(args.mode, rootpath)\n if not targets:\n sys.exit(f\"Error: No targets selected (root: {rootpath})\")\n\n def fmt_for_path(fmt, path):\n return fmt.format(\n path.as_posix(),\n rel=path.relative_to(rootpath).as_posix(),\n raw=path,\n rawrel=path.relative_to(rootpath),\n )\n\n def _runcmd(cmd):\n result = runsubprocess(\n args.dry_run, shlex.split(cmd), cwd=rootpath, check=False\n )\n if result is not None and result.returncode not in args.allowexitcode:\n print(\n f\"'{cmd}' failed with code {result.returncode}\",\n file=sys.stderr,\n )\n sys.exit(result.returncode)\n\n if args.all:\n allstr = args.allsep.join(\n fmt_for_path(args.all, path) for path in targets\n )\n cmd = args.format.format(allstr)\n _runcmd(cmd)\n else:\n for target in targets:\n cmd = fmt_for_path(args.format, target)\n _runcmd(cmd)\n\n\ndef clean_remainder_args(remainder_args):\n if remainder_args and remainder_args[0] == \"--\":\n del remainder_args[0]\n\n\ndef join_args(arglist):\n return \" \".join(map(shlex.quote, arglist))\n\n\ndef install_args(args):\n clean_remainder_args(args.pipargs)\n if args.eager_upgrades:\n args.pipargs += [\"--upgrade-strategy=eager\"]\n\n if args.with_dev_deps:\n runsubprocess(\n args.dry_run,\n [\n \"python\",\n \"-m\",\n \"pip\",\n \"install\",\n \"--upgrade\",\n \"pip\",\n \"setuptools\",\n \"wheel\",\n ]\n + args.pipargs,\n check=True,\n )\n\n allfmt = \"-e 'file://{}\" if args.editable else \"'file://{}\"\n # packages should provide an extra_requires that is named\n # 'test', to denote test dependencies.\n extras = []\n if args.with_test_deps:\n extras.append(\"test\")\n if extras:\n allfmt += f\"[{','.join(extras)}]\"\n # note the trailing single quote, to close the quote opened above.\n allfmt += \"'\"\n\n execute_args(\n parse_subargs(\n args,\n (\n \"exec\",\n \"python -m pip install {} \" + join_args(args.pipargs),\n \"--all\",\n allfmt,\n ),\n )\n )\n if args.with_dev_deps:\n rootpath = find_projectroot()\n runsubprocess(\n args.dry_run,\n [\n \"python\",\n \"-m\",\n \"pip\",\n \"install\",\n \"--upgrade\",\n \"-r\",\n str(rootpath / \"dev-requirements.txt\"),\n ]\n + args.pipargs,\n check=True,\n )\n\n\ndef parse_subargs(parentargs, args):\n subargs = parse_args(args)\n subargs.dry_run = parentargs.dry_run or subargs.dry_run\n return subargs\n\n\ndef lint_args(args):\n rootdir = str(find_projectroot())\n\n runsubprocess(\n args.dry_run,\n (\"black\", \"--config\", \"pyproject.toml\", \".\") + ((\"--diff\", \"--check\") if args.check_only else ()),\n cwd=rootdir,\n check=True,\n )\n runsubprocess(\n args.dry_run,\n (\"isort\", \"--settings-path\", \".isort.cfg\", \".\")\n + ((\"--diff\", \"--check-only\") if args.check_only else ()),\n cwd=rootdir,\n check=True,\n )\n runsubprocess(args.dry_run, (\"flake8\", \"--config\", \".flake8\", rootdir), check=True)\n execute_args(\n parse_subargs(\n args, (\"exec\", \"pylint {}\", \"--all\", \"--mode\", \"lintroots\")\n )\n )\n execute_args(\n parse_subargs(\n args,\n (\n \"exec\",\n \"python scripts/check_for_valid_readme.py {}\",\n \"--all\",\n ),\n )\n )\n\n\ndef update_changelog(path, version, new_entry):\n unreleased_changes = False\n try:\n with open(path, encoding=\"utf-8\") as changelog:\n text = changelog.read()\n if f\"## [{version}]\" in text:\n raise AttributeError(\n f\"{path} already contains version {version}\"\n )\n with open(path, encoding=\"utf-8\") as changelog:\n for line in changelog:\n if line.startswith(\"## [Unreleased]\"):\n unreleased_changes = False\n elif line.startswith(\"## \"):\n break\n elif len(line.strip()) > 0:\n unreleased_changes = True\n\n except FileNotFoundError:\n print(f\"file missing: {path}\")\n return\n\n if unreleased_changes:\n print(f\"updating: {path}\")\n text = re.sub(r\"## \\[Unreleased\\].*\", new_entry, text)\n with open(path, \"w\", encoding=\"utf-8\") as changelog:\n changelog.write(text)\n\n\ndef update_changelogs(version):\n today = datetime.now().strftime(\"%Y-%m-%d\")\n new_entry = \"\"\"## [Unreleased](https://github.com/open-telemetry/opentelemetry-python/compare/v{version}...HEAD)\n\n## [{version}](https://github.com/open-telemetry/opentelemetry-python/releases/tag/v{version}) - {today}\n\n\"\"\".format(\n version=version, today=today\n )\n errors = False\n try:\n update_changelog(\"./CHANGELOG.md\", version, new_entry)\n except Exception as err: # pylint: disable=broad-except\n print(str(err))\n errors = True\n\n if errors:\n sys.exit(1)\n\n\ndef find(name, path):\n for root, _, files in os.walk(path):\n if name in files:\n return os.path.join(root, name)\n return None\n\n\ndef filter_packages(targets, packages):\n filtered_packages = []\n for target in targets:\n for pkg in packages:\n if pkg in str(target):\n filtered_packages.append(target)\n break\n return filtered_packages\n\n\ndef update_version_files(targets, version, packages):\n print(\"updating version.py files\")\n targets = filter_packages(targets, packages)\n update_files(\n targets,\n \"version.py\",\n \"__version__ .*\",\n f'__version__ = \"{version}\"',\n )\n\n\ndef update_dependencies(targets, version, packages):\n print(\"updating dependencies\")\n for pkg in packages:\n update_files(\n targets,\n \"pyproject.toml\",\n rf\"({basename(pkg)}.*)==(.*)\",\n r\"\\1== \" + version + '\",',\n )\n\n\ndef update_files(targets, filename, search, replace):\n errors = False\n for target in targets:\n curr_file = find(filename, target)\n if curr_file is None:\n print(f\"file missing: {target}/{filename}\")\n continue\n\n with open(curr_file, encoding=\"utf-8\") as _file:\n text = _file.read()\n\n if replace in text:\n print(f\"{curr_file} already contains {replace}\")\n continue\n\n with open(curr_file, \"w\", encoding=\"utf-8\") as _file:\n _file.write(re.sub(search, replace, text))\n\n if errors:\n sys.exit(1)\n\n\ndef release_args(args):\n print(\"preparing release\")\n\n rootpath = find_projectroot()\n targets = list(find_targets_unordered(rootpath))\n cfg = ConfigParser()\n cfg.read(str(find_projectroot() / \"eachdist.ini\"))\n versions = args.versions\n updated_versions = []\n for group in versions.split(\",\"):\n mcfg = cfg[group]\n version = mcfg[\"version\"]\n updated_versions.append(version)\n packages = mcfg[\"packages\"].split()\n print(f\"update {group} packages to {version}\")\n update_dependencies(targets, version, packages)\n update_version_files(targets, version, packages)\n\n update_changelogs(updated_versions[0])\n\n\ndef test_args(args):\n clean_remainder_args(args.pytestargs)\n execute_args(\n parse_subargs(\n args,\n (\n \"exec\",\n \"pytest {} \" + join_args(args.pytestargs),\n \"--mode\",\n \"testroots\",\n ),\n )\n )\n\n\ndef format_args(args):\n root_dir = format_dir = str(find_projectroot())\n if args.path:\n format_dir = os.path.join(format_dir, args.path)\n\n runsubprocess(\n args.dry_run,\n (\"black\", \"--config\", f\"{root_dir}/pyproject.toml\", \".\"),\n cwd=format_dir,\n check=True,\n )\n runsubprocess(\n args.dry_run,\n (\"isort\", \"--settings-path\", f\"{root_dir}/.isort.cfg\", \"--profile\", \"black\", \".\"),\n cwd=format_dir,\n check=True,\n )\n\n\ndef version_args(args):\n cfg = ConfigParser()\n cfg.read(str(find_projectroot() / \"eachdist.ini\"))\n print(cfg[args.mode][\"version\"])\n\n\ndef main():\n args = parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/eachdist.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index ea4f873b37d..6d3b1d2fedb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased](https://github.com/open-telemetry/opentelemetry-python/compare/v1.13.0-0.34b0...HEAD) +## [Unreleased](https://github.com/open-telemetry/opentelemetry-python/compare/v1.13.0...HEAD) - Update explicit histogram bucket boundaries ([#2947](https://github.com/open-telemetry/opentelemetry-python/pull/2947)) diff --git a/scripts/eachdist.py b/scripts/eachdist.py index c02a75b256d..0c0620e95ac 100755 --- a/scripts/eachdist.py +++ b/scripts/eachdist.py @@ -675,7 +675,7 @@ def release_args(args): update_dependencies(targets, version, packages) update_version_files(targets, version, packages) - update_changelogs("-".join(updated_versions)) + update_changelogs(updated_versions[0]) def test_args(args):
open-mmlab__mmcv-1834
Support None in DictAction Some times there are needs to override some key in the config to be `None`, e.g., the load_from is assigned to some string and you want to override it through `--cfg-options load_from=None`, it will pass a string of None rather than the real `None` value to the config and make checkpoint loader load checkpoint from a path named `'None'`
[ { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport ast\nimport copy\nimport os\nimport os.path as osp\nimport platform\nimport shutil\nimport sys\nimport tempfile\nimport types\nimport uuid\nimport warnings\nfrom argparse import Action, ArgumentParser\nfrom collections import abc\nfrom importlib import import_module\n\nfrom addict import Dict\nfrom yapf.yapflib.yapf_api import FormatCode\n\nfrom .misc import import_modules_from_strings\nfrom .path import check_file_exist\n\nif platform.system() == 'Windows':\n import regex as re\nelse:\n import re\n\nBASE_KEY = '_base_'\nDELETE_KEY = '_delete_'\nDEPRECATION_KEY = '_deprecation_'\nRESERVED_KEYS = ['filename', 'text', 'pretty_text']\n\n\nclass ConfigDict(Dict):\n\n def __missing__(self, name):\n raise KeyError(name)\n\n def __getattr__(self, name):\n try:\n value = super(ConfigDict, self).__getattr__(name)\n except KeyError:\n ex = AttributeError(f\"'{self.__class__.__name__}' object has no \"\n f\"attribute '{name}'\")\n except Exception as e:\n ex = e\n else:\n return value\n raise ex\n\n\ndef add_args(parser, cfg, prefix=''):\n for k, v in cfg.items():\n if isinstance(v, str):\n parser.add_argument('--' + prefix + k)\n elif isinstance(v, int):\n parser.add_argument('--' + prefix + k, type=int)\n elif isinstance(v, float):\n parser.add_argument('--' + prefix + k, type=float)\n elif isinstance(v, bool):\n parser.add_argument('--' + prefix + k, action='store_true')\n elif isinstance(v, dict):\n add_args(parser, v, prefix + k + '.')\n elif isinstance(v, abc.Iterable):\n parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+')\n else:\n print(f'cannot parse key {prefix + k} of type {type(v)}')\n return parser\n\n\nclass Config:\n \"\"\"A facility for config and config files.\n\n It supports common file formats as configs: python/json/yaml. The interface\n is the same as a dict object and also allows access config values as\n attributes.\n\n Example:\n >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))\n >>> cfg.a\n 1\n >>> cfg.b\n {'b1': [0, 1]}\n >>> cfg.b.b1\n [0, 1]\n >>> cfg = Config.fromfile('tests/data/config/a.py')\n >>> cfg.filename\n \"/home/kchen/projects/mmcv/tests/data/config/a.py\"\n >>> cfg.item4\n 'test'\n >>> cfg\n \"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: \"\n \"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}\"\n \"\"\"\n\n @staticmethod\n def _validate_py_syntax(filename):\n with open(filename, 'r', encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n content = f.read()\n try:\n ast.parse(content)\n except SyntaxError as e:\n raise SyntaxError('There are syntax errors in config '\n f'file {filename}: {e}')\n\n @staticmethod\n def _substitute_predefined_vars(filename, temp_config_name):\n file_dirname = osp.dirname(filename)\n file_basename = osp.basename(filename)\n file_basename_no_extension = osp.splitext(file_basename)[0]\n file_extname = osp.splitext(filename)[1]\n support_templates = dict(\n fileDirname=file_dirname,\n fileBasename=file_basename,\n fileBasenameNoExtension=file_basename_no_extension,\n fileExtname=file_extname)\n with open(filename, 'r', encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n config_file = f.read()\n for key, value in support_templates.items():\n regexp = r'\\{\\{\\s*' + str(key) + r'\\s*\\}\\}'\n value = value.replace('\\\\', '/')\n config_file = re.sub(regexp, value, config_file)\n with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:\n tmp_config_file.write(config_file)\n\n @staticmethod\n def _pre_substitute_base_vars(filename, temp_config_name):\n \"\"\"Substitute base variable placehoders to string, so that parsing\n would work.\"\"\"\n with open(filename, 'r', encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n config_file = f.read()\n base_var_dict = {}\n regexp = r'\\{\\{\\s*' + BASE_KEY + r'\\.([\\w\\.]+)\\s*\\}\\}'\n base_vars = set(re.findall(regexp, config_file))\n for base_var in base_vars:\n randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}'\n base_var_dict[randstr] = base_var\n regexp = r'\\{\\{\\s*' + BASE_KEY + r'\\.' + base_var + r'\\s*\\}\\}'\n config_file = re.sub(regexp, f'\"{randstr}\"', config_file)\n with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:\n tmp_config_file.write(config_file)\n return base_var_dict\n\n @staticmethod\n def _substitute_base_vars(cfg, base_var_dict, base_cfg):\n \"\"\"Substitute variable strings to their actual values.\"\"\"\n cfg = copy.deepcopy(cfg)\n\n if isinstance(cfg, dict):\n for k, v in cfg.items():\n if isinstance(v, str) and v in base_var_dict:\n new_v = base_cfg\n for new_k in base_var_dict[v].split('.'):\n new_v = new_v[new_k]\n cfg[k] = new_v\n elif isinstance(v, (list, tuple, dict)):\n cfg[k] = Config._substitute_base_vars(\n v, base_var_dict, base_cfg)\n elif isinstance(cfg, tuple):\n cfg = tuple(\n Config._substitute_base_vars(c, base_var_dict, base_cfg)\n for c in cfg)\n elif isinstance(cfg, list):\n cfg = [\n Config._substitute_base_vars(c, base_var_dict, base_cfg)\n for c in cfg\n ]\n elif isinstance(cfg, str) and cfg in base_var_dict:\n new_v = base_cfg\n for new_k in base_var_dict[cfg].split('.'):\n new_v = new_v[new_k]\n cfg = new_v\n\n return cfg\n\n @staticmethod\n def _file2dict(filename, use_predefined_variables=True):\n filename = osp.abspath(osp.expanduser(filename))\n check_file_exist(filename)\n fileExtname = osp.splitext(filename)[1]\n if fileExtname not in ['.py', '.json', '.yaml', '.yml']:\n raise IOError('Only py/yml/yaml/json type are supported now!')\n\n with tempfile.TemporaryDirectory() as temp_config_dir:\n temp_config_file = tempfile.NamedTemporaryFile(\n dir=temp_config_dir, suffix=fileExtname)\n if platform.system() == 'Windows':\n temp_config_file.close()\n temp_config_name = osp.basename(temp_config_file.name)\n # Substitute predefined variables\n if use_predefined_variables:\n Config._substitute_predefined_vars(filename,\n temp_config_file.name)\n else:\n shutil.copyfile(filename, temp_config_file.name)\n # Substitute base variables from placeholders to strings\n base_var_dict = Config._pre_substitute_base_vars(\n temp_config_file.name, temp_config_file.name)\n\n if filename.endswith('.py'):\n temp_module_name = osp.splitext(temp_config_name)[0]\n sys.path.insert(0, temp_config_dir)\n Config._validate_py_syntax(filename)\n mod = import_module(temp_module_name)\n sys.path.pop(0)\n cfg_dict = {\n name: value\n for name, value in mod.__dict__.items()\n if not name.startswith('__')\n and not isinstance(value, types.ModuleType)\n and not isinstance(value, types.FunctionType)\n }\n # delete imported module\n del sys.modules[temp_module_name]\n elif filename.endswith(('.yml', '.yaml', '.json')):\n import mmcv\n cfg_dict = mmcv.load(temp_config_file.name)\n # close temp file\n temp_config_file.close()\n\n # check deprecation information\n if DEPRECATION_KEY in cfg_dict:\n deprecation_info = cfg_dict.pop(DEPRECATION_KEY)\n warning_msg = f'The config file {filename} will be deprecated ' \\\n 'in the future.'\n if 'expected' in deprecation_info:\n warning_msg += f' Please use {deprecation_info[\"expected\"]} ' \\\n 'instead.'\n if 'reference' in deprecation_info:\n warning_msg += ' More information can be found at ' \\\n f'{deprecation_info[\"reference\"]}'\n warnings.warn(warning_msg, DeprecationWarning)\n\n cfg_text = filename + '\\n'\n with open(filename, 'r', encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n cfg_text += f.read()\n\n if BASE_KEY in cfg_dict:\n cfg_dir = osp.dirname(filename)\n base_filename = cfg_dict.pop(BASE_KEY)\n base_filename = base_filename if isinstance(\n base_filename, list) else [base_filename]\n\n cfg_dict_list = list()\n cfg_text_list = list()\n for f in base_filename:\n _cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f))\n cfg_dict_list.append(_cfg_dict)\n cfg_text_list.append(_cfg_text)\n\n base_cfg_dict = dict()\n for c in cfg_dict_list:\n duplicate_keys = base_cfg_dict.keys() & c.keys()\n if len(duplicate_keys) > 0:\n raise KeyError('Duplicate key is not allowed among bases. '\n f'Duplicate keys: {duplicate_keys}')\n base_cfg_dict.update(c)\n\n # Substitute base variables from strings to their actual values\n cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict,\n base_cfg_dict)\n\n base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)\n cfg_dict = base_cfg_dict\n\n # merge cfg_text\n cfg_text_list.append(cfg_text)\n cfg_text = '\\n'.join(cfg_text_list)\n\n return cfg_dict, cfg_text\n\n @staticmethod\n def _merge_a_into_b(a, b, allow_list_keys=False):\n \"\"\"merge dict ``a`` into dict ``b`` (non-inplace).\n\n Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid\n in-place modifications.\n\n Args:\n a (dict): The source dict to be merged into ``b``.\n b (dict): The origin dict to be fetch keys from ``a``.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in source ``a`` and will replace the element of the\n corresponding index in b if b is a list. Default: False.\n\n Returns:\n dict: The modified dict of ``b`` using ``a``.\n\n Examples:\n # Normally merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # Delete b first and merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # b is a list\n >>> Config._merge_a_into_b(\n ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True)\n [{'a': 2}, {'b': 2}]\n \"\"\"\n b = b.copy()\n for k, v in a.items():\n if allow_list_keys and k.isdigit() and isinstance(b, list):\n k = int(k)\n if len(b) <= k:\n raise KeyError(f'Index {k} exceeds the length of list {b}')\n b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)\n elif isinstance(v, dict):\n if k in b and not v.pop(DELETE_KEY, False):\n allowed_types = (dict, list) if allow_list_keys else dict\n if not isinstance(b[k], allowed_types):\n raise TypeError(\n f'{k}={v} in child config cannot inherit from '\n f'base because {k} is a dict in the child config '\n f'but is of type {type(b[k])} in base config. '\n f'You may set `{DELETE_KEY}=True` to ignore the '\n f'base config.')\n b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)\n else:\n b[k] = ConfigDict(v)\n else:\n b[k] = v\n return b\n\n @staticmethod\n def fromfile(filename,\n use_predefined_variables=True,\n import_custom_modules=True):\n cfg_dict, cfg_text = Config._file2dict(filename,\n use_predefined_variables)\n if import_custom_modules and cfg_dict.get('custom_imports', None):\n import_modules_from_strings(**cfg_dict['custom_imports'])\n return Config(cfg_dict, cfg_text=cfg_text, filename=filename)\n\n @staticmethod\n def fromstring(cfg_str, file_format):\n \"\"\"Generate config from config str.\n\n Args:\n cfg_str (str): Config str.\n file_format (str): Config file format corresponding to the\n config str. Only py/yml/yaml/json type are supported now!\n\n Returns:\n :obj:`Config`: Config obj.\n \"\"\"\n if file_format not in ['.py', '.json', '.yaml', '.yml']:\n raise IOError('Only py/yml/yaml/json type are supported now!')\n if file_format != '.py' and 'dict(' in cfg_str:\n # check if users specify a wrong suffix for python\n warnings.warn(\n 'Please check \"file_format\", the file format may be .py')\n with tempfile.NamedTemporaryFile(\n 'w', encoding='utf-8', suffix=file_format,\n delete=False) as temp_file:\n temp_file.write(cfg_str)\n # on windows, previous implementation cause error\n # see PR 1077 for details\n cfg = Config.fromfile(temp_file.name)\n os.remove(temp_file.name)\n return cfg\n\n @staticmethod\n def auto_argparser(description=None):\n \"\"\"Generate argparser from config file automatically (experimental)\"\"\"\n partial_parser = ArgumentParser(description=description)\n partial_parser.add_argument('config', help='config file path')\n cfg_file = partial_parser.parse_known_args()[0].config\n cfg = Config.fromfile(cfg_file)\n parser = ArgumentParser(description=description)\n parser.add_argument('config', help='config file path')\n add_args(parser, cfg)\n return parser, cfg\n\n def __init__(self, cfg_dict=None, cfg_text=None, filename=None):\n if cfg_dict is None:\n cfg_dict = dict()\n elif not isinstance(cfg_dict, dict):\n raise TypeError('cfg_dict must be a dict, but '\n f'got {type(cfg_dict)}')\n for key in cfg_dict:\n if key in RESERVED_KEYS:\n raise KeyError(f'{key} is reserved for config file')\n\n super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict))\n super(Config, self).__setattr__('_filename', filename)\n if cfg_text:\n text = cfg_text\n elif filename:\n with open(filename, 'r') as f:\n text = f.read()\n else:\n text = ''\n super(Config, self).__setattr__('_text', text)\n\n @property\n def filename(self):\n return self._filename\n\n @property\n def text(self):\n return self._text\n\n @property\n def pretty_text(self):\n\n indent = 4\n\n def _indent(s_, num_spaces):\n s = s_.split('\\n')\n if len(s) == 1:\n return s_\n first = s.pop(0)\n s = [(num_spaces * ' ') + line for line in s]\n s = '\\n'.join(s)\n s = first + '\\n' + s\n return s\n\n def _format_basic_types(k, v, use_mapping=False):\n if isinstance(v, str):\n v_str = f\"'{v}'\"\n else:\n v_str = str(v)\n\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f'{k_str}: {v_str}'\n else:\n attr_str = f'{str(k)}={v_str}'\n attr_str = _indent(attr_str, indent)\n\n return attr_str\n\n def _format_list(k, v, use_mapping=False):\n # check if all items in the list are dict\n if all(isinstance(_, dict) for _ in v):\n v_str = '[\\n'\n v_str += '\\n'.join(\n f'dict({_indent(_format_dict(v_), indent)}),'\n for v_ in v).rstrip(',')\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f'{k_str}: {v_str}'\n else:\n attr_str = f'{str(k)}={v_str}'\n attr_str = _indent(attr_str, indent) + ']'\n else:\n attr_str = _format_basic_types(k, v, use_mapping)\n return attr_str\n\n def _contain_invalid_identifier(dict_str):\n contain_invalid_identifier = False\n for key_name in dict_str:\n contain_invalid_identifier |= \\\n (not str(key_name).isidentifier())\n return contain_invalid_identifier\n\n def _format_dict(input_dict, outest_level=False):\n r = ''\n s = []\n\n use_mapping = _contain_invalid_identifier(input_dict)\n if use_mapping:\n r += '{'\n for idx, (k, v) in enumerate(input_dict.items()):\n is_last = idx >= len(input_dict) - 1\n end = '' if outest_level or is_last else ','\n if isinstance(v, dict):\n v_str = '\\n' + _format_dict(v)\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f'{k_str}: dict({v_str}'\n else:\n attr_str = f'{str(k)}=dict({v_str}'\n attr_str = _indent(attr_str, indent) + ')' + end\n elif isinstance(v, list):\n attr_str = _format_list(k, v, use_mapping) + end\n else:\n attr_str = _format_basic_types(k, v, use_mapping) + end\n\n s.append(attr_str)\n r += '\\n'.join(s)\n if use_mapping:\n r += '}'\n return r\n\n cfg_dict = self._cfg_dict.to_dict()\n text = _format_dict(cfg_dict, outest_level=True)\n # copied from setup.cfg\n yapf_style = dict(\n based_on_style='pep8',\n blank_line_before_nested_class_or_def=True,\n split_before_expression_after_opening_paren=True)\n text, _ = FormatCode(text, style_config=yapf_style, verify=True)\n\n return text\n\n def __repr__(self):\n return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}'\n\n def __len__(self):\n return len(self._cfg_dict)\n\n def __getattr__(self, name):\n return getattr(self._cfg_dict, name)\n\n def __getitem__(self, name):\n return self._cfg_dict.__getitem__(name)\n\n def __setattr__(self, name, value):\n if isinstance(value, dict):\n value = ConfigDict(value)\n self._cfg_dict.__setattr__(name, value)\n\n def __setitem__(self, name, value):\n if isinstance(value, dict):\n value = ConfigDict(value)\n self._cfg_dict.__setitem__(name, value)\n\n def __iter__(self):\n return iter(self._cfg_dict)\n\n def __getstate__(self):\n return (self._cfg_dict, self._filename, self._text)\n\n def __copy__(self):\n cls = self.__class__\n other = cls.__new__(cls)\n other.__dict__.update(self.__dict__)\n\n return other\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n other = cls.__new__(cls)\n memo[id(self)] = other\n\n for key, value in self.__dict__.items():\n super(Config, other).__setattr__(key, copy.deepcopy(value, memo))\n\n return other\n\n def __setstate__(self, state):\n _cfg_dict, _filename, _text = state\n super(Config, self).__setattr__('_cfg_dict', _cfg_dict)\n super(Config, self).__setattr__('_filename', _filename)\n super(Config, self).__setattr__('_text', _text)\n\n def dump(self, file=None):\n cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict()\n if self.filename.endswith('.py'):\n if file is None:\n return self.pretty_text\n else:\n with open(file, 'w', encoding='utf-8') as f:\n f.write(self.pretty_text)\n else:\n import mmcv\n if file is None:\n file_format = self.filename.split('.')[-1]\n return mmcv.dump(cfg_dict, file_format=file_format)\n else:\n mmcv.dump(cfg_dict, file)\n\n def merge_from_dict(self, options, allow_list_keys=True):\n \"\"\"Merge list into cfg_dict.\n\n Merge the dict parsed by MultipleKVAction into this cfg.\n\n Examples:\n >>> options = {'model.backbone.depth': 50,\n ... 'model.backbone.with_cp':True}\n >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))\n >>> cfg.merge_from_dict(options)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(\n ... model=dict(backbone=dict(depth=50, with_cp=True)))\n\n >>> # Merge list element\n >>> cfg = Config(dict(pipeline=[\n ... dict(type='LoadImage'), dict(type='LoadAnnotations')]))\n >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})\n >>> cfg.merge_from_dict(options, allow_list_keys=True)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(pipeline=[\n ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')])\n\n Args:\n options (dict): dict of configs to merge from.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in ``options`` and will replace the element of the\n corresponding index in the config if the config is a list.\n Default: True.\n \"\"\"\n option_cfg_dict = {}\n for full_key, v in options.items():\n d = option_cfg_dict\n key_list = full_key.split('.')\n for subkey in key_list[:-1]:\n d.setdefault(subkey, ConfigDict())\n d = d[subkey]\n subkey = key_list[-1]\n d[subkey] = v\n\n cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n super(Config, self).__setattr__(\n '_cfg_dict',\n Config._merge_a_into_b(\n option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys))\n\n\nclass DictAction(Action):\n \"\"\"\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary. List options can\n be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit\n brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build\n list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]'\n \"\"\"\n\n @staticmethod\n def _parse_int_float_bool(val):\n try:\n return int(val)\n except ValueError:\n pass\n try:\n return float(val)\n except ValueError:\n pass\n if val.lower() in ['true', 'false']:\n return True if val.lower() == 'true' else False\n return val\n\n @staticmethod\n def _parse_iterable(val):\n \"\"\"Parse iterable values in the string.\n\n All elements inside '()' or '[]' are treated as iterable values.\n\n Args:\n val (str): Value string.\n\n Returns:\n list | tuple: The expanded list or tuple from the string.\n\n Examples:\n >>> DictAction._parse_iterable('1,2,3')\n [1, 2, 3]\n >>> DictAction._parse_iterable('[a, b, c]')\n ['a', 'b', 'c']\n >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]')\n [(1, 2, 3), ['a', 'b'], 'c']\n \"\"\"\n\n def find_next_comma(string):\n \"\"\"Find the position of next comma in the string.\n\n If no ',' is found in the string, return the string length. All\n chars inside '()' and '[]' are treated as one element and thus ','\n inside these brackets are ignored.\n \"\"\"\n assert (string.count('(') == string.count(')')) and (\n string.count('[') == string.count(']')), \\\n f'Imbalanced brackets exist in {string}'\n end = len(string)\n for idx, char in enumerate(string):\n pre = string[:idx]\n # The string before this ',' is balanced\n if ((char == ',') and (pre.count('(') == pre.count(')'))\n and (pre.count('[') == pre.count(']'))):\n end = idx\n break\n return end\n\n # Strip ' and \" characters and replace whitespace.\n val = val.strip('\\'\\\"').replace(' ', '')\n is_tuple = False\n if val.startswith('(') and val.endswith(')'):\n is_tuple = True\n val = val[1:-1]\n elif val.startswith('[') and val.endswith(']'):\n val = val[1:-1]\n elif ',' not in val:\n # val is a single value\n return DictAction._parse_int_float_bool(val)\n\n values = []\n while len(val) > 0:\n comma_idx = find_next_comma(val)\n element = DictAction._parse_iterable(val[:comma_idx])\n values.append(element)\n val = val[comma_idx + 1:]\n if is_tuple:\n values = tuple(values)\n return values\n\n def __call__(self, parser, namespace, values, option_string=None):\n options = {}\n for kv in values:\n key, val = kv.split('=', maxsplit=1)\n options[key] = self._parse_iterable(val)\n setattr(namespace, self.dest, options)\n", "path": "mmcv/utils/config.py" } ]
[ { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport ast\nimport copy\nimport os\nimport os.path as osp\nimport platform\nimport shutil\nimport sys\nimport tempfile\nimport types\nimport uuid\nimport warnings\nfrom argparse import Action, ArgumentParser\nfrom collections import abc\nfrom importlib import import_module\n\nfrom addict import Dict\nfrom yapf.yapflib.yapf_api import FormatCode\n\nfrom .misc import import_modules_from_strings\nfrom .path import check_file_exist\n\nif platform.system() == 'Windows':\n import regex as re\nelse:\n import re\n\nBASE_KEY = '_base_'\nDELETE_KEY = '_delete_'\nDEPRECATION_KEY = '_deprecation_'\nRESERVED_KEYS = ['filename', 'text', 'pretty_text']\n\n\nclass ConfigDict(Dict):\n\n def __missing__(self, name):\n raise KeyError(name)\n\n def __getattr__(self, name):\n try:\n value = super(ConfigDict, self).__getattr__(name)\n except KeyError:\n ex = AttributeError(f\"'{self.__class__.__name__}' object has no \"\n f\"attribute '{name}'\")\n except Exception as e:\n ex = e\n else:\n return value\n raise ex\n\n\ndef add_args(parser, cfg, prefix=''):\n for k, v in cfg.items():\n if isinstance(v, str):\n parser.add_argument('--' + prefix + k)\n elif isinstance(v, int):\n parser.add_argument('--' + prefix + k, type=int)\n elif isinstance(v, float):\n parser.add_argument('--' + prefix + k, type=float)\n elif isinstance(v, bool):\n parser.add_argument('--' + prefix + k, action='store_true')\n elif isinstance(v, dict):\n add_args(parser, v, prefix + k + '.')\n elif isinstance(v, abc.Iterable):\n parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+')\n else:\n print(f'cannot parse key {prefix + k} of type {type(v)}')\n return parser\n\n\nclass Config:\n \"\"\"A facility for config and config files.\n\n It supports common file formats as configs: python/json/yaml. The interface\n is the same as a dict object and also allows access config values as\n attributes.\n\n Example:\n >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))\n >>> cfg.a\n 1\n >>> cfg.b\n {'b1': [0, 1]}\n >>> cfg.b.b1\n [0, 1]\n >>> cfg = Config.fromfile('tests/data/config/a.py')\n >>> cfg.filename\n \"/home/kchen/projects/mmcv/tests/data/config/a.py\"\n >>> cfg.item4\n 'test'\n >>> cfg\n \"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: \"\n \"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}\"\n \"\"\"\n\n @staticmethod\n def _validate_py_syntax(filename):\n with open(filename, 'r', encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n content = f.read()\n try:\n ast.parse(content)\n except SyntaxError as e:\n raise SyntaxError('There are syntax errors in config '\n f'file {filename}: {e}')\n\n @staticmethod\n def _substitute_predefined_vars(filename, temp_config_name):\n file_dirname = osp.dirname(filename)\n file_basename = osp.basename(filename)\n file_basename_no_extension = osp.splitext(file_basename)[0]\n file_extname = osp.splitext(filename)[1]\n support_templates = dict(\n fileDirname=file_dirname,\n fileBasename=file_basename,\n fileBasenameNoExtension=file_basename_no_extension,\n fileExtname=file_extname)\n with open(filename, 'r', encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n config_file = f.read()\n for key, value in support_templates.items():\n regexp = r'\\{\\{\\s*' + str(key) + r'\\s*\\}\\}'\n value = value.replace('\\\\', '/')\n config_file = re.sub(regexp, value, config_file)\n with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:\n tmp_config_file.write(config_file)\n\n @staticmethod\n def _pre_substitute_base_vars(filename, temp_config_name):\n \"\"\"Substitute base variable placehoders to string, so that parsing\n would work.\"\"\"\n with open(filename, 'r', encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n config_file = f.read()\n base_var_dict = {}\n regexp = r'\\{\\{\\s*' + BASE_KEY + r'\\.([\\w\\.]+)\\s*\\}\\}'\n base_vars = set(re.findall(regexp, config_file))\n for base_var in base_vars:\n randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}'\n base_var_dict[randstr] = base_var\n regexp = r'\\{\\{\\s*' + BASE_KEY + r'\\.' + base_var + r'\\s*\\}\\}'\n config_file = re.sub(regexp, f'\"{randstr}\"', config_file)\n with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:\n tmp_config_file.write(config_file)\n return base_var_dict\n\n @staticmethod\n def _substitute_base_vars(cfg, base_var_dict, base_cfg):\n \"\"\"Substitute variable strings to their actual values.\"\"\"\n cfg = copy.deepcopy(cfg)\n\n if isinstance(cfg, dict):\n for k, v in cfg.items():\n if isinstance(v, str) and v in base_var_dict:\n new_v = base_cfg\n for new_k in base_var_dict[v].split('.'):\n new_v = new_v[new_k]\n cfg[k] = new_v\n elif isinstance(v, (list, tuple, dict)):\n cfg[k] = Config._substitute_base_vars(\n v, base_var_dict, base_cfg)\n elif isinstance(cfg, tuple):\n cfg = tuple(\n Config._substitute_base_vars(c, base_var_dict, base_cfg)\n for c in cfg)\n elif isinstance(cfg, list):\n cfg = [\n Config._substitute_base_vars(c, base_var_dict, base_cfg)\n for c in cfg\n ]\n elif isinstance(cfg, str) and cfg in base_var_dict:\n new_v = base_cfg\n for new_k in base_var_dict[cfg].split('.'):\n new_v = new_v[new_k]\n cfg = new_v\n\n return cfg\n\n @staticmethod\n def _file2dict(filename, use_predefined_variables=True):\n filename = osp.abspath(osp.expanduser(filename))\n check_file_exist(filename)\n fileExtname = osp.splitext(filename)[1]\n if fileExtname not in ['.py', '.json', '.yaml', '.yml']:\n raise IOError('Only py/yml/yaml/json type are supported now!')\n\n with tempfile.TemporaryDirectory() as temp_config_dir:\n temp_config_file = tempfile.NamedTemporaryFile(\n dir=temp_config_dir, suffix=fileExtname)\n if platform.system() == 'Windows':\n temp_config_file.close()\n temp_config_name = osp.basename(temp_config_file.name)\n # Substitute predefined variables\n if use_predefined_variables:\n Config._substitute_predefined_vars(filename,\n temp_config_file.name)\n else:\n shutil.copyfile(filename, temp_config_file.name)\n # Substitute base variables from placeholders to strings\n base_var_dict = Config._pre_substitute_base_vars(\n temp_config_file.name, temp_config_file.name)\n\n if filename.endswith('.py'):\n temp_module_name = osp.splitext(temp_config_name)[0]\n sys.path.insert(0, temp_config_dir)\n Config._validate_py_syntax(filename)\n mod = import_module(temp_module_name)\n sys.path.pop(0)\n cfg_dict = {\n name: value\n for name, value in mod.__dict__.items()\n if not name.startswith('__')\n and not isinstance(value, types.ModuleType)\n and not isinstance(value, types.FunctionType)\n }\n # delete imported module\n del sys.modules[temp_module_name]\n elif filename.endswith(('.yml', '.yaml', '.json')):\n import mmcv\n cfg_dict = mmcv.load(temp_config_file.name)\n # close temp file\n temp_config_file.close()\n\n # check deprecation information\n if DEPRECATION_KEY in cfg_dict:\n deprecation_info = cfg_dict.pop(DEPRECATION_KEY)\n warning_msg = f'The config file {filename} will be deprecated ' \\\n 'in the future.'\n if 'expected' in deprecation_info:\n warning_msg += f' Please use {deprecation_info[\"expected\"]} ' \\\n 'instead.'\n if 'reference' in deprecation_info:\n warning_msg += ' More information can be found at ' \\\n f'{deprecation_info[\"reference\"]}'\n warnings.warn(warning_msg, DeprecationWarning)\n\n cfg_text = filename + '\\n'\n with open(filename, 'r', encoding='utf-8') as f:\n # Setting encoding explicitly to resolve coding issue on windows\n cfg_text += f.read()\n\n if BASE_KEY in cfg_dict:\n cfg_dir = osp.dirname(filename)\n base_filename = cfg_dict.pop(BASE_KEY)\n base_filename = base_filename if isinstance(\n base_filename, list) else [base_filename]\n\n cfg_dict_list = list()\n cfg_text_list = list()\n for f in base_filename:\n _cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f))\n cfg_dict_list.append(_cfg_dict)\n cfg_text_list.append(_cfg_text)\n\n base_cfg_dict = dict()\n for c in cfg_dict_list:\n duplicate_keys = base_cfg_dict.keys() & c.keys()\n if len(duplicate_keys) > 0:\n raise KeyError('Duplicate key is not allowed among bases. '\n f'Duplicate keys: {duplicate_keys}')\n base_cfg_dict.update(c)\n\n # Substitute base variables from strings to their actual values\n cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict,\n base_cfg_dict)\n\n base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)\n cfg_dict = base_cfg_dict\n\n # merge cfg_text\n cfg_text_list.append(cfg_text)\n cfg_text = '\\n'.join(cfg_text_list)\n\n return cfg_dict, cfg_text\n\n @staticmethod\n def _merge_a_into_b(a, b, allow_list_keys=False):\n \"\"\"merge dict ``a`` into dict ``b`` (non-inplace).\n\n Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid\n in-place modifications.\n\n Args:\n a (dict): The source dict to be merged into ``b``.\n b (dict): The origin dict to be fetch keys from ``a``.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in source ``a`` and will replace the element of the\n corresponding index in b if b is a list. Default: False.\n\n Returns:\n dict: The modified dict of ``b`` using ``a``.\n\n Examples:\n # Normally merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # Delete b first and merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # b is a list\n >>> Config._merge_a_into_b(\n ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True)\n [{'a': 2}, {'b': 2}]\n \"\"\"\n b = b.copy()\n for k, v in a.items():\n if allow_list_keys and k.isdigit() and isinstance(b, list):\n k = int(k)\n if len(b) <= k:\n raise KeyError(f'Index {k} exceeds the length of list {b}')\n b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)\n elif isinstance(v, dict):\n if k in b and not v.pop(DELETE_KEY, False):\n allowed_types = (dict, list) if allow_list_keys else dict\n if not isinstance(b[k], allowed_types):\n raise TypeError(\n f'{k}={v} in child config cannot inherit from '\n f'base because {k} is a dict in the child config '\n f'but is of type {type(b[k])} in base config. '\n f'You may set `{DELETE_KEY}=True` to ignore the '\n f'base config.')\n b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)\n else:\n b[k] = ConfigDict(v)\n else:\n b[k] = v\n return b\n\n @staticmethod\n def fromfile(filename,\n use_predefined_variables=True,\n import_custom_modules=True):\n cfg_dict, cfg_text = Config._file2dict(filename,\n use_predefined_variables)\n if import_custom_modules and cfg_dict.get('custom_imports', None):\n import_modules_from_strings(**cfg_dict['custom_imports'])\n return Config(cfg_dict, cfg_text=cfg_text, filename=filename)\n\n @staticmethod\n def fromstring(cfg_str, file_format):\n \"\"\"Generate config from config str.\n\n Args:\n cfg_str (str): Config str.\n file_format (str): Config file format corresponding to the\n config str. Only py/yml/yaml/json type are supported now!\n\n Returns:\n :obj:`Config`: Config obj.\n \"\"\"\n if file_format not in ['.py', '.json', '.yaml', '.yml']:\n raise IOError('Only py/yml/yaml/json type are supported now!')\n if file_format != '.py' and 'dict(' in cfg_str:\n # check if users specify a wrong suffix for python\n warnings.warn(\n 'Please check \"file_format\", the file format may be .py')\n with tempfile.NamedTemporaryFile(\n 'w', encoding='utf-8', suffix=file_format,\n delete=False) as temp_file:\n temp_file.write(cfg_str)\n # on windows, previous implementation cause error\n # see PR 1077 for details\n cfg = Config.fromfile(temp_file.name)\n os.remove(temp_file.name)\n return cfg\n\n @staticmethod\n def auto_argparser(description=None):\n \"\"\"Generate argparser from config file automatically (experimental)\"\"\"\n partial_parser = ArgumentParser(description=description)\n partial_parser.add_argument('config', help='config file path')\n cfg_file = partial_parser.parse_known_args()[0].config\n cfg = Config.fromfile(cfg_file)\n parser = ArgumentParser(description=description)\n parser.add_argument('config', help='config file path')\n add_args(parser, cfg)\n return parser, cfg\n\n def __init__(self, cfg_dict=None, cfg_text=None, filename=None):\n if cfg_dict is None:\n cfg_dict = dict()\n elif not isinstance(cfg_dict, dict):\n raise TypeError('cfg_dict must be a dict, but '\n f'got {type(cfg_dict)}')\n for key in cfg_dict:\n if key in RESERVED_KEYS:\n raise KeyError(f'{key} is reserved for config file')\n\n super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict))\n super(Config, self).__setattr__('_filename', filename)\n if cfg_text:\n text = cfg_text\n elif filename:\n with open(filename, 'r') as f:\n text = f.read()\n else:\n text = ''\n super(Config, self).__setattr__('_text', text)\n\n @property\n def filename(self):\n return self._filename\n\n @property\n def text(self):\n return self._text\n\n @property\n def pretty_text(self):\n\n indent = 4\n\n def _indent(s_, num_spaces):\n s = s_.split('\\n')\n if len(s) == 1:\n return s_\n first = s.pop(0)\n s = [(num_spaces * ' ') + line for line in s]\n s = '\\n'.join(s)\n s = first + '\\n' + s\n return s\n\n def _format_basic_types(k, v, use_mapping=False):\n if isinstance(v, str):\n v_str = f\"'{v}'\"\n else:\n v_str = str(v)\n\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f'{k_str}: {v_str}'\n else:\n attr_str = f'{str(k)}={v_str}'\n attr_str = _indent(attr_str, indent)\n\n return attr_str\n\n def _format_list(k, v, use_mapping=False):\n # check if all items in the list are dict\n if all(isinstance(_, dict) for _ in v):\n v_str = '[\\n'\n v_str += '\\n'.join(\n f'dict({_indent(_format_dict(v_), indent)}),'\n for v_ in v).rstrip(',')\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f'{k_str}: {v_str}'\n else:\n attr_str = f'{str(k)}={v_str}'\n attr_str = _indent(attr_str, indent) + ']'\n else:\n attr_str = _format_basic_types(k, v, use_mapping)\n return attr_str\n\n def _contain_invalid_identifier(dict_str):\n contain_invalid_identifier = False\n for key_name in dict_str:\n contain_invalid_identifier |= \\\n (not str(key_name).isidentifier())\n return contain_invalid_identifier\n\n def _format_dict(input_dict, outest_level=False):\n r = ''\n s = []\n\n use_mapping = _contain_invalid_identifier(input_dict)\n if use_mapping:\n r += '{'\n for idx, (k, v) in enumerate(input_dict.items()):\n is_last = idx >= len(input_dict) - 1\n end = '' if outest_level or is_last else ','\n if isinstance(v, dict):\n v_str = '\\n' + _format_dict(v)\n if use_mapping:\n k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n attr_str = f'{k_str}: dict({v_str}'\n else:\n attr_str = f'{str(k)}=dict({v_str}'\n attr_str = _indent(attr_str, indent) + ')' + end\n elif isinstance(v, list):\n attr_str = _format_list(k, v, use_mapping) + end\n else:\n attr_str = _format_basic_types(k, v, use_mapping) + end\n\n s.append(attr_str)\n r += '\\n'.join(s)\n if use_mapping:\n r += '}'\n return r\n\n cfg_dict = self._cfg_dict.to_dict()\n text = _format_dict(cfg_dict, outest_level=True)\n # copied from setup.cfg\n yapf_style = dict(\n based_on_style='pep8',\n blank_line_before_nested_class_or_def=True,\n split_before_expression_after_opening_paren=True)\n text, _ = FormatCode(text, style_config=yapf_style, verify=True)\n\n return text\n\n def __repr__(self):\n return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}'\n\n def __len__(self):\n return len(self._cfg_dict)\n\n def __getattr__(self, name):\n return getattr(self._cfg_dict, name)\n\n def __getitem__(self, name):\n return self._cfg_dict.__getitem__(name)\n\n def __setattr__(self, name, value):\n if isinstance(value, dict):\n value = ConfigDict(value)\n self._cfg_dict.__setattr__(name, value)\n\n def __setitem__(self, name, value):\n if isinstance(value, dict):\n value = ConfigDict(value)\n self._cfg_dict.__setitem__(name, value)\n\n def __iter__(self):\n return iter(self._cfg_dict)\n\n def __getstate__(self):\n return (self._cfg_dict, self._filename, self._text)\n\n def __copy__(self):\n cls = self.__class__\n other = cls.__new__(cls)\n other.__dict__.update(self.__dict__)\n\n return other\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n other = cls.__new__(cls)\n memo[id(self)] = other\n\n for key, value in self.__dict__.items():\n super(Config, other).__setattr__(key, copy.deepcopy(value, memo))\n\n return other\n\n def __setstate__(self, state):\n _cfg_dict, _filename, _text = state\n super(Config, self).__setattr__('_cfg_dict', _cfg_dict)\n super(Config, self).__setattr__('_filename', _filename)\n super(Config, self).__setattr__('_text', _text)\n\n def dump(self, file=None):\n cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict()\n if self.filename.endswith('.py'):\n if file is None:\n return self.pretty_text\n else:\n with open(file, 'w', encoding='utf-8') as f:\n f.write(self.pretty_text)\n else:\n import mmcv\n if file is None:\n file_format = self.filename.split('.')[-1]\n return mmcv.dump(cfg_dict, file_format=file_format)\n else:\n mmcv.dump(cfg_dict, file)\n\n def merge_from_dict(self, options, allow_list_keys=True):\n \"\"\"Merge list into cfg_dict.\n\n Merge the dict parsed by MultipleKVAction into this cfg.\n\n Examples:\n >>> options = {'model.backbone.depth': 50,\n ... 'model.backbone.with_cp':True}\n >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))\n >>> cfg.merge_from_dict(options)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(\n ... model=dict(backbone=dict(depth=50, with_cp=True)))\n\n >>> # Merge list element\n >>> cfg = Config(dict(pipeline=[\n ... dict(type='LoadImage'), dict(type='LoadAnnotations')]))\n >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})\n >>> cfg.merge_from_dict(options, allow_list_keys=True)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(pipeline=[\n ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')])\n\n Args:\n options (dict): dict of configs to merge from.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in ``options`` and will replace the element of the\n corresponding index in the config if the config is a list.\n Default: True.\n \"\"\"\n option_cfg_dict = {}\n for full_key, v in options.items():\n d = option_cfg_dict\n key_list = full_key.split('.')\n for subkey in key_list[:-1]:\n d.setdefault(subkey, ConfigDict())\n d = d[subkey]\n subkey = key_list[-1]\n d[subkey] = v\n\n cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n super(Config, self).__setattr__(\n '_cfg_dict',\n Config._merge_a_into_b(\n option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys))\n\n\nclass DictAction(Action):\n \"\"\"\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary. List options can\n be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit\n brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build\n list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]'\n \"\"\"\n\n @staticmethod\n def _parse_int_float_bool(val):\n try:\n return int(val)\n except ValueError:\n pass\n try:\n return float(val)\n except ValueError:\n pass\n if val.lower() in ['true', 'false']:\n return True if val.lower() == 'true' else False\n if val == 'None':\n return None\n return val\n\n @staticmethod\n def _parse_iterable(val):\n \"\"\"Parse iterable values in the string.\n\n All elements inside '()' or '[]' are treated as iterable values.\n\n Args:\n val (str): Value string.\n\n Returns:\n list | tuple: The expanded list or tuple from the string.\n\n Examples:\n >>> DictAction._parse_iterable('1,2,3')\n [1, 2, 3]\n >>> DictAction._parse_iterable('[a, b, c]')\n ['a', 'b', 'c']\n >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]')\n [(1, 2, 3), ['a', 'b'], 'c']\n \"\"\"\n\n def find_next_comma(string):\n \"\"\"Find the position of next comma in the string.\n\n If no ',' is found in the string, return the string length. All\n chars inside '()' and '[]' are treated as one element and thus ','\n inside these brackets are ignored.\n \"\"\"\n assert (string.count('(') == string.count(')')) and (\n string.count('[') == string.count(']')), \\\n f'Imbalanced brackets exist in {string}'\n end = len(string)\n for idx, char in enumerate(string):\n pre = string[:idx]\n # The string before this ',' is balanced\n if ((char == ',') and (pre.count('(') == pre.count(')'))\n and (pre.count('[') == pre.count(']'))):\n end = idx\n break\n return end\n\n # Strip ' and \" characters and replace whitespace.\n val = val.strip('\\'\\\"').replace(' ', '')\n is_tuple = False\n if val.startswith('(') and val.endswith(')'):\n is_tuple = True\n val = val[1:-1]\n elif val.startswith('[') and val.endswith(']'):\n val = val[1:-1]\n elif ',' not in val:\n # val is a single value\n return DictAction._parse_int_float_bool(val)\n\n values = []\n while len(val) > 0:\n comma_idx = find_next_comma(val)\n element = DictAction._parse_iterable(val[:comma_idx])\n values.append(element)\n val = val[comma_idx + 1:]\n if is_tuple:\n values = tuple(values)\n return values\n\n def __call__(self, parser, namespace, values, option_string=None):\n options = {}\n for kv in values:\n key, val = kv.split('=', maxsplit=1)\n options[key] = self._parse_iterable(val)\n setattr(namespace, self.dest, options)\n", "path": "mmcv/utils/config.py" } ]
diff --git a/mmcv/utils/config.py b/mmcv/utils/config.py index 4f96372ae4..473bd1e165 100644 --- a/mmcv/utils/config.py +++ b/mmcv/utils/config.py @@ -638,6 +638,8 @@ def _parse_int_float_bool(val): pass if val.lower() in ['true', 'false']: return True if val.lower() == 'true' else False + if val == 'None': + return None return val @staticmethod diff --git a/tests/test_utils/test_config.py b/tests/test_utils/test_config.py index 6f9f95726c..368016e572 100644 --- a/tests/test_utils/test_config.py +++ b/tests/test_utils/test_config.py @@ -470,9 +470,18 @@ def test_dict_action(): with pytest.raises(AssertionError): parser.parse_args(['--options', 'item2.a=[(a,b), [1,2], false']) # Normal values - args = parser.parse_args( - ['--options', 'item2.a=1', 'item2.b=0.1', 'item2.c=x', 'item3=false']) - out_dict = {'item2.a': 1, 'item2.b': 0.1, 'item2.c': 'x', 'item3': False} + args = parser.parse_args([ + '--options', 'item2.a=1', 'item2.b=0.1', 'item2.c=x', 'item3=false', + 'item4=none', 'item5=None' + ]) + out_dict = { + 'item2.a': 1, + 'item2.b': 0.1, + 'item2.c': 'x', + 'item3': False, + 'item4': 'none', + 'item5': None, + } assert args.options == out_dict cfg_file = osp.join(data_path, 'config/a.py') cfg = Config.fromfile(cfg_file)
django-helpdesk__django-helpdesk-645
systems_settings url should only be available to staff users currently, the template starts to render before it fails if the user is not logged in.
[ { "content": "\"\"\"\ndjango-helpdesk - A Django powered ticket tracker for small enterprise.\n\n(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.\n\nurls.py - Mapping of URL's to our various views. Note we always used NAMED\n views for simplicity in linking later on.\n\"\"\"\n\nfrom django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import views as auth_views\nfrom django.views.generic import TemplateView\n\nfrom helpdesk.decorators import helpdesk_staff_member_required, protect_view\nfrom helpdesk import settings as helpdesk_settings\nfrom helpdesk.views import feeds, staff, public, kb\n\n\nclass DirectTemplateView(TemplateView):\n extra_context = None\n\n def get_context_data(self, **kwargs):\n context = super(self.__class__, self).get_context_data(**kwargs)\n if self.extra_context is not None:\n for key, value in self.extra_context.items():\n if callable(value):\n context[key] = value()\n else:\n context[key] = value\n return context\n\n\napp_name = 'helpdesk'\n\nurlpatterns = [\n url(r'^dashboard/$',\n staff.dashboard,\n name='dashboard'),\n\n url(r'^tickets/$',\n staff.ticket_list,\n name='list'),\n\n url(r'^tickets/update/$',\n staff.mass_update,\n name='mass_update'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/$',\n staff.view_ticket,\n name='view'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/followup_edit/(?P<followup_id>[0-9]+)/$',\n staff.followup_edit,\n name='followup_edit'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/followup_delete/(?P<followup_id>[0-9]+)/$',\n staff.followup_delete,\n name='followup_delete'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/edit/$',\n staff.edit_ticket,\n name='edit'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/update/$',\n staff.update_ticket,\n name='update'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/delete/$',\n staff.delete_ticket,\n name='delete'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/hold/$',\n staff.hold_ticket,\n name='hold'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/unhold/$',\n staff.unhold_ticket,\n name='unhold'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/$',\n staff.ticket_cc,\n name='ticket_cc'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/add/$',\n staff.ticket_cc_add,\n name='ticket_cc_add'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/delete/(?P<cc_id>[0-9]+)/$',\n staff.ticket_cc_del,\n name='ticket_cc_del'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/dependency/add/$',\n staff.ticket_dependency_add,\n name='ticket_dependency_add'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/dependency/delete/(?P<dependency_id>[0-9]+)/$',\n staff.ticket_dependency_del,\n name='ticket_dependency_del'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/attachment_delete/(?P<attachment_id>[0-9]+)/$',\n staff.attachment_del,\n name='attachment_del'),\n\n url(r'^raw/(?P<type>\\w+)/$',\n staff.raw_details,\n name='raw'),\n\n url(r'^rss/$',\n staff.rss_list,\n name='rss_index'),\n\n url(r'^reports/$',\n staff.report_index,\n name='report_index'),\n\n url(r'^reports/(?P<report>\\w+)/$',\n staff.run_report,\n name='run_report'),\n\n url(r'^save_query/$',\n staff.save_query,\n name='savequery'),\n\n url(r'^delete_query/(?P<id>[0-9]+)/$',\n staff.delete_saved_query,\n name='delete_query'),\n\n url(r'^settings/$',\n staff.user_settings,\n name='user_settings'),\n\n url(r'^ignore/$',\n staff.email_ignore,\n name='email_ignore'),\n\n url(r'^ignore/add/$',\n staff.email_ignore_add,\n name='email_ignore_add'),\n\n url(r'^ignore/delete/(?P<id>[0-9]+)/$',\n staff.email_ignore_del,\n name='email_ignore_del'),\n]\n\nurlpatterns += [\n url(r'^$',\n protect_view(public.Homepage.as_view()),\n name='home'),\n\n url(r'^tickets/submit/$',\n public.create_ticket,\n name='submit'),\n\n url(r'^view/$',\n public.view_ticket,\n name='public_view'),\n\n url(r'^change_language/$',\n public.change_language,\n name='public_change_language'),\n]\n\nurlpatterns += [\n url(r'^rss/user/(?P<user_name>[^/]+)/$',\n helpdesk_staff_member_required(feeds.OpenTicketsByUser()),\n name='rss_user'),\n\n url(r'^rss/user/(?P<user_name>[^/]+)/(?P<queue_slug>[A-Za-z0-9_-]+)/$',\n helpdesk_staff_member_required(feeds.OpenTicketsByUser()),\n name='rss_user_queue'),\n\n url(r'^rss/queue/(?P<queue_slug>[A-Za-z0-9_-]+)/$',\n helpdesk_staff_member_required(feeds.OpenTicketsByQueue()),\n name='rss_queue'),\n\n url(r'^rss/unassigned/$',\n helpdesk_staff_member_required(feeds.UnassignedTickets()),\n name='rss_unassigned'),\n\n url(r'^rss/recent_activity/$',\n helpdesk_staff_member_required(feeds.RecentFollowUps()),\n name='rss_activity'),\n]\n\n\nurlpatterns += [\n url(r'^login/$',\n auth_views.LoginView.as_view(\n template_name='helpdesk/registration/login.html'),\n name='login'),\n\n url(r'^logout/$',\n auth_views.LogoutView.as_view(\n template_name='helpdesk/registration/login.html',\n next_page='../'),\n name='logout'),\n]\n\nif helpdesk_settings.HELPDESK_KB_ENABLED:\n urlpatterns += [\n url(r'^kb/$',\n kb.index,\n name='kb_index'),\n\n url(r'^kb/(?P<item>[0-9]+)/$',\n kb.item,\n name='kb_item'),\n\n url(r'^kb/(?P<item>[0-9]+)/vote/$',\n kb.vote,\n name='kb_vote'),\n\n url(r'^kb/(?P<slug>[A-Za-z0-9_-]+)/$',\n kb.category,\n name='kb_category'),\n ]\n\nurlpatterns += [\n url(r'^help/context/$',\n TemplateView.as_view(template_name='helpdesk/help_context.html'),\n name='help_context'),\n\n url(r'^system_settings/$',\n DirectTemplateView.as_view(template_name='helpdesk/system_settings.html'),\n name='system_settings'),\n]\n", "path": "helpdesk/urls.py" } ]
[ { "content": "\"\"\"\ndjango-helpdesk - A Django powered ticket tracker for small enterprise.\n\n(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.\n\nurls.py - Mapping of URL's to our various views. Note we always used NAMED\n views for simplicity in linking later on.\n\"\"\"\n\nfrom django.conf.urls import url\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import views as auth_views\nfrom django.views.generic import TemplateView\n\nfrom helpdesk.decorators import helpdesk_staff_member_required, protect_view\nfrom helpdesk import settings as helpdesk_settings\nfrom helpdesk.views import feeds, staff, public, kb\n\n\nclass DirectTemplateView(TemplateView):\n extra_context = None\n\n def get_context_data(self, **kwargs):\n context = super(self.__class__, self).get_context_data(**kwargs)\n if self.extra_context is not None:\n for key, value in self.extra_context.items():\n if callable(value):\n context[key] = value()\n else:\n context[key] = value\n return context\n\n\napp_name = 'helpdesk'\n\nurlpatterns = [\n url(r'^dashboard/$',\n staff.dashboard,\n name='dashboard'),\n\n url(r'^tickets/$',\n staff.ticket_list,\n name='list'),\n\n url(r'^tickets/update/$',\n staff.mass_update,\n name='mass_update'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/$',\n staff.view_ticket,\n name='view'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/followup_edit/(?P<followup_id>[0-9]+)/$',\n staff.followup_edit,\n name='followup_edit'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/followup_delete/(?P<followup_id>[0-9]+)/$',\n staff.followup_delete,\n name='followup_delete'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/edit/$',\n staff.edit_ticket,\n name='edit'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/update/$',\n staff.update_ticket,\n name='update'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/delete/$',\n staff.delete_ticket,\n name='delete'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/hold/$',\n staff.hold_ticket,\n name='hold'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/unhold/$',\n staff.unhold_ticket,\n name='unhold'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/$',\n staff.ticket_cc,\n name='ticket_cc'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/add/$',\n staff.ticket_cc_add,\n name='ticket_cc_add'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/delete/(?P<cc_id>[0-9]+)/$',\n staff.ticket_cc_del,\n name='ticket_cc_del'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/dependency/add/$',\n staff.ticket_dependency_add,\n name='ticket_dependency_add'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/dependency/delete/(?P<dependency_id>[0-9]+)/$',\n staff.ticket_dependency_del,\n name='ticket_dependency_del'),\n\n url(r'^tickets/(?P<ticket_id>[0-9]+)/attachment_delete/(?P<attachment_id>[0-9]+)/$',\n staff.attachment_del,\n name='attachment_del'),\n\n url(r'^raw/(?P<type>\\w+)/$',\n staff.raw_details,\n name='raw'),\n\n url(r'^rss/$',\n staff.rss_list,\n name='rss_index'),\n\n url(r'^reports/$',\n staff.report_index,\n name='report_index'),\n\n url(r'^reports/(?P<report>\\w+)/$',\n staff.run_report,\n name='run_report'),\n\n url(r'^save_query/$',\n staff.save_query,\n name='savequery'),\n\n url(r'^delete_query/(?P<id>[0-9]+)/$',\n staff.delete_saved_query,\n name='delete_query'),\n\n url(r'^settings/$',\n staff.user_settings,\n name='user_settings'),\n\n url(r'^ignore/$',\n staff.email_ignore,\n name='email_ignore'),\n\n url(r'^ignore/add/$',\n staff.email_ignore_add,\n name='email_ignore_add'),\n\n url(r'^ignore/delete/(?P<id>[0-9]+)/$',\n staff.email_ignore_del,\n name='email_ignore_del'),\n]\n\nurlpatterns += [\n url(r'^$',\n protect_view(public.Homepage.as_view()),\n name='home'),\n\n url(r'^tickets/submit/$',\n public.create_ticket,\n name='submit'),\n\n url(r'^view/$',\n public.view_ticket,\n name='public_view'),\n\n url(r'^change_language/$',\n public.change_language,\n name='public_change_language'),\n]\n\nurlpatterns += [\n url(r'^rss/user/(?P<user_name>[^/]+)/$',\n helpdesk_staff_member_required(feeds.OpenTicketsByUser()),\n name='rss_user'),\n\n url(r'^rss/user/(?P<user_name>[^/]+)/(?P<queue_slug>[A-Za-z0-9_-]+)/$',\n helpdesk_staff_member_required(feeds.OpenTicketsByUser()),\n name='rss_user_queue'),\n\n url(r'^rss/queue/(?P<queue_slug>[A-Za-z0-9_-]+)/$',\n helpdesk_staff_member_required(feeds.OpenTicketsByQueue()),\n name='rss_queue'),\n\n url(r'^rss/unassigned/$',\n helpdesk_staff_member_required(feeds.UnassignedTickets()),\n name='rss_unassigned'),\n\n url(r'^rss/recent_activity/$',\n helpdesk_staff_member_required(feeds.RecentFollowUps()),\n name='rss_activity'),\n]\n\n\nurlpatterns += [\n url(r'^login/$',\n auth_views.LoginView.as_view(\n template_name='helpdesk/registration/login.html'),\n name='login'),\n\n url(r'^logout/$',\n auth_views.LogoutView.as_view(\n template_name='helpdesk/registration/login.html',\n next_page='../'),\n name='logout'),\n]\n\nif helpdesk_settings.HELPDESK_KB_ENABLED:\n urlpatterns += [\n url(r'^kb/$',\n kb.index,\n name='kb_index'),\n\n url(r'^kb/(?P<item>[0-9]+)/$',\n kb.item,\n name='kb_item'),\n\n url(r'^kb/(?P<item>[0-9]+)/vote/$',\n kb.vote,\n name='kb_vote'),\n\n url(r'^kb/(?P<slug>[A-Za-z0-9_-]+)/$',\n kb.category,\n name='kb_category'),\n ]\n\nurlpatterns += [\n url(r'^help/context/$',\n TemplateView.as_view(template_name='helpdesk/help_context.html'),\n name='help_context'),\n\n url(r'^system_settings/$',\n login_required(DirectTemplateView.as_view(template_name='helpdesk/system_settings.html')),\n name='system_settings'),\n]\n", "path": "helpdesk/urls.py" } ]
diff --git a/helpdesk/urls.py b/helpdesk/urls.py index 969d8dd1d..02d1ff1d0 100644 --- a/helpdesk/urls.py +++ b/helpdesk/urls.py @@ -222,6 +222,6 @@ def get_context_data(self, **kwargs): name='help_context'), url(r'^system_settings/$', - DirectTemplateView.as_view(template_name='helpdesk/system_settings.html'), + login_required(DirectTemplateView.as_view(template_name='helpdesk/system_settings.html')), name='system_settings'), ]
pre-commit__pre-commit-67
TypeError while instantiating LoggingHandler (2.6) I assume this is new-style vs old-style classes being grumpy? ``` >>> from pre_commit.logging_handler import LoggingHandler >>> LoggingHandler(True) Traceback (most recent call last): File "<stdin>", line 1, in <module> File ".../py_env/lib/python2.6/site-packages/pre_commit/logging_handler.py", line 19, in __init__ super(LoggingHandler, self).__init__() TypeError: super() argument 1 must be type, not classobj ```
[ { "content": "\nfrom __future__ import print_function\n\nimport logging\n\nfrom pre_commit import color\n\n\nLOG_LEVEL_COLORS = {\n 'DEBUG': '',\n 'INFO': '',\n 'WARNING': color.YELLOW,\n 'ERROR': color.RED,\n}\n\n\nclass LoggingHandler(logging.Handler):\n def __init__(self, use_color):\n super(LoggingHandler, self).__init__()\n self.use_color = use_color\n\n def emit(self, record):\n print(\n u'{0}{1}'.format(\n color.format_color(\n '[{0}]'.format(record.levelname),\n LOG_LEVEL_COLORS[record.levelname],\n self.use_color,\n ) + ' ' if record.levelno >= logging.WARNING else '',\n record.getMessage(),\n )\n )\n", "path": "pre_commit/logging_handler.py" } ]
[ { "content": "\nfrom __future__ import print_function\n\nimport logging\n\nfrom pre_commit import color\n\n\nLOG_LEVEL_COLORS = {\n 'DEBUG': '',\n 'INFO': '',\n 'WARNING': color.YELLOW,\n 'ERROR': color.RED,\n}\n\n\nclass LoggingHandler(logging.Handler):\n def __init__(self, use_color):\n logging.Handler.__init__(self)\n self.use_color = use_color\n\n def emit(self, record):\n print(\n u'{0}{1}'.format(\n color.format_color(\n '[{0}]'.format(record.levelname),\n LOG_LEVEL_COLORS[record.levelname],\n self.use_color,\n ) + ' ' if record.levelno >= logging.WARNING else '',\n record.getMessage(),\n )\n )\n", "path": "pre_commit/logging_handler.py" } ]
diff --git a/pre_commit/logging_handler.py b/pre_commit/logging_handler.py index 11736d1b5..70b61e730 100644 --- a/pre_commit/logging_handler.py +++ b/pre_commit/logging_handler.py @@ -16,7 +16,7 @@ class LoggingHandler(logging.Handler): def __init__(self, use_color): - super(LoggingHandler, self).__init__() + logging.Handler.__init__(self) self.use_color = use_color def emit(self, record): diff --git a/tests/logging_handler_test.py b/tests/logging_handler_test.py new file mode 100644 index 000000000..d2fed4189 --- /dev/null +++ b/tests/logging_handler_test.py @@ -0,0 +1,38 @@ +import __builtin__ +import mock +import pytest + +from pre_commit import color +from pre_commit.logging_handler import LoggingHandler + + [email protected]_fixture +def print_mock(): + with mock.patch.object(__builtin__, 'print', autospec=True) as print_mock: + yield print_mock + + +class FakeLogRecord(object): + def __init__(self, message, levelname, levelno): + self.message = message + self.levelname = levelname + self.levelno = levelno + + def getMessage(self): + return self.message + + +def test_logging_handler_color(print_mock): + handler = LoggingHandler(True) + handler.emit(FakeLogRecord('hi', 'WARNING', 30)) + print_mock.assert_called_once_with( + color.YELLOW + '[WARNING]' + color.NORMAL + ' hi', + ) + + +def test_logging_handler_no_color(print_mock): + handler = LoggingHandler(False) + handler.emit(FakeLogRecord('hi', 'WARNING', 30)) + print_mock.assert_called_once_with( + '[WARNING] hi', + )
opendatacube__datacube-core-747
Dataset `__eq__` fails when other object is not a Dataset ### Expected behaviour ```python ds, *_ = dc.find_datasets(..) assert (ds == "33") is False ``` ### Actual behaviour Error is raised inside `__eq__` operator. ``` --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-17-a8ddf445deb8> in <module> 1 ds, *_ = dc.find_datasets(product=product, limit=10) 2 ----> 3 assert (ds == "33") is False ~/wk/datacube-core/datacube/model/__init__.py in __eq__(self, other) 286 287 def __eq__(self, other) -> bool: --> 288 return self.id == other.id 289 290 def __hash__(self): AttributeError: 'str' object has no attribute 'id' ```
[ { "content": "# coding=utf-8\n\"\"\"\nCore classes used across modules.\n\"\"\"\nimport logging\nimport math\nimport warnings\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom pathlib import Path\nfrom uuid import UUID\n\nfrom affine import Affine\nfrom typing import Optional, List, Mapping, Any, Dict, Tuple, Iterator\n\nfrom urllib.parse import urlparse\nfrom datacube.utils import geometry, without_lineage_sources, parse_time, cached_property, uri_to_local_path, \\\n schema_validated, DocReader\nfrom .fields import Field\nfrom ._base import Range\n\n_LOG = logging.getLogger(__name__)\n\nDEFAULT_SPATIAL_DIMS = ('y', 'x') # Used when product lacks grid_spec\n\nSCHEMA_PATH = Path(__file__).parent / 'schema'\n\n\nclass Dataset(object):\n \"\"\"\n A Dataset. A container of metadata, and refers typically to a multi-band raster on disk.\n\n Most important parts are the metadata_doc and uri.\n\n :param metadata_doc: the document (typically a parsed json/yaml)\n :param uris: All active uris for the dataset\n \"\"\"\n\n def __init__(self,\n type_: 'DatasetType',\n metadata_doc: dict,\n local_uri: Optional[str] = None,\n uris: Optional[List[str]] = None,\n sources: Optional[Mapping[str, 'Dataset']] = None,\n indexed_by: Optional[str] = None,\n indexed_time: Optional[datetime] = None,\n archived_time: Optional[datetime] = None):\n assert isinstance(type_, DatasetType)\n\n self.type = type_\n\n #: The document describing the dataset as a dictionary. It is often serialised as YAML on disk\n #: or inside a NetCDF file, and as JSON-B inside the database index.\n self.metadata_doc = metadata_doc\n\n if local_uri:\n warnings.warn(\n \"Dataset.local_uri has been replaced with list Dataset.uris\",\n DeprecationWarning\n )\n if not uris:\n uris = []\n\n uris.append(local_uri)\n\n #: Active URIs in order from newest to oldest\n self.uris = uris\n\n #: The datasets that this dataset is derived from (if requested on load).\n self.sources = sources\n\n if self.sources is not None:\n assert set(self.metadata.sources.keys()) == set(self.sources.keys())\n\n #: The User who indexed this dataset\n self.indexed_by = indexed_by\n self.indexed_time = indexed_time\n # When the dataset was archived. Null it not archived.\n self.archived_time = archived_time\n\n @property\n def metadata_type(self) -> Optional['MetadataType']:\n return self.type.metadata_type if self.type else None\n\n @property\n def local_uri(self) -> Optional[str]:\n \"\"\"\n The latest local file uri, if any.\n \"\"\"\n if self.uris is None:\n return None\n\n local_uris = [uri for uri in self.uris if uri.startswith('file:')]\n if local_uris:\n return local_uris[0]\n\n return None\n\n @property\n def local_path(self) -> Optional[Path]:\n \"\"\"\n A path to this dataset on the local filesystem (if available).\n \"\"\"\n return uri_to_local_path(self.local_uri)\n\n @property\n def id(self) -> UUID:\n \"\"\" UUID of a dataset\n \"\"\"\n # This is a string in a raw document.\n return UUID(self.metadata.id)\n\n @property\n def managed(self) -> bool:\n return self.type.managed\n\n @property\n def format(self) -> str:\n return self.metadata.format\n\n @property\n def uri_scheme(self) -> str:\n if self.uris is None or len(self.uris) == 0:\n return ''\n\n url = urlparse(self.uris[0])\n if url.scheme == '':\n return 'file'\n return url.scheme\n\n @property\n def measurements(self) -> Dict[str, Any]:\n # It's an optional field in documents.\n # Dictionary of key -> measurement descriptor\n if not hasattr(self.metadata, 'measurements'):\n return {}\n return self.metadata.measurements\n\n @cached_property\n def center_time(self) -> Optional[datetime]:\n \"\"\" mid-point of time range\n \"\"\"\n time = self.time\n if time is None:\n return None\n return time.begin + (time.end - time.begin) // 2\n\n @property\n def time(self) -> Optional[Range]:\n try:\n time = self.metadata.time\n return Range(parse_time(time.begin), parse_time(time.end))\n except AttributeError:\n return None\n\n @cached_property\n def key_time(self):\n \"\"\"\n :rtype: datetime.datetime\n \"\"\"\n if 'key_time' in self.metadata.fields:\n return self.metadata.key_time\n\n # Existing datasets are already using the computed \"center_time\" for their storage index key\n # if 'center_time' in self.metadata.fields:\n # return self.metadata.center_time\n\n return self.center_time\n\n @property\n def bounds(self) -> Optional[geometry.BoundingBox]:\n \"\"\" :returns: bounding box of the dataset in the native crs\n \"\"\"\n gs = self._gs\n if gs is None:\n return None\n\n bounds = gs['geo_ref_points']\n return geometry.BoundingBox(left=min(bounds['ur']['x'], bounds['ll']['x']),\n right=max(bounds['ur']['x'], bounds['ll']['x']),\n top=max(bounds['ur']['y'], bounds['ll']['y']),\n bottom=min(bounds['ur']['y'], bounds['ll']['y']))\n\n @property\n def transform(self) -> Optional[Affine]:\n geo = self._gs\n if geo is None:\n return None\n\n bounds = geo.get('geo_ref_points')\n if bounds is None:\n return None\n\n return Affine(bounds['lr']['x'] - bounds['ul']['x'], 0, bounds['ul']['x'],\n 0, bounds['lr']['y'] - bounds['ul']['y'], bounds['ul']['y'])\n\n @property\n def is_archived(self) -> bool:\n \"\"\"\n Is this dataset archived?\n\n (an archived dataset is one that is not intended to be used by users anymore: eg. it has been\n replaced by another dataset. It will not show up in search results, but still exists in the\n system via provenance chains or through id lookup.)\n\n \"\"\"\n return self.archived_time is not None\n\n @property\n def is_active(self) -> bool:\n \"\"\"\n Is this dataset active?\n\n (ie. dataset hasn't been archived)\n\n \"\"\"\n return not self.is_archived\n\n @property\n def _gs(self) -> Optional[Dict[str, Any]]:\n try:\n return self.metadata.grid_spatial\n except AttributeError:\n return None\n\n @property\n def crs(self) -> Optional[geometry.CRS]:\n \"\"\" Return CRS if available\n \"\"\"\n projection = self._gs\n\n if not projection:\n return None\n\n crs = projection.get('spatial_reference', None)\n if crs:\n return geometry.CRS(str(crs))\n\n # Try to infer CRS\n zone_ = projection.get('zone')\n datum_ = projection.get('datum')\n if zone_ and datum_:\n try:\n # TODO: really need CRS specified properly in agdc-metadata.yaml\n if datum_ == 'GDA94':\n return geometry.CRS('EPSG:283' + str(abs(zone_)))\n if datum_ == 'WGS84':\n if zone_[-1] == 'S':\n return geometry.CRS('EPSG:327' + str(abs(int(zone_[:-1]))))\n else:\n return geometry.CRS('EPSG:326' + str(abs(int(zone_[:-1]))))\n except geometry.InvalidCRSError:\n # We still return None, as they didn't specify a CRS explicitly...\n _LOG.warning(\n \"Can't figure out projection: possibly invalid zone (%r) for datum (%r).\", zone_, datum_\n )\n\n return None\n\n @cached_property\n def extent(self) -> Optional[geometry.Geometry]:\n \"\"\" :returns: valid extent of the dataset or None\n \"\"\"\n\n def xytuple(obj):\n return obj['x'], obj['y']\n\n # If no projection or crs, they have no extent.\n projection = self._gs\n if not projection:\n return None\n crs = self.crs\n if not crs:\n _LOG.debug(\"No CRS, assuming no extent (dataset %s)\", self.id)\n return None\n\n valid_data = projection.get('valid_data')\n geo_ref_points = projection.get('geo_ref_points')\n if valid_data:\n return geometry.Geometry(valid_data, crs=crs)\n elif geo_ref_points:\n return geometry.polygon([xytuple(geo_ref_points[key]) for key in ('ll', 'ul', 'ur', 'lr', 'll')],\n crs=crs)\n\n return None\n\n def __eq__(self, other) -> bool:\n return self.id == other.id\n\n def __hash__(self):\n return hash(self.id)\n\n def __str__(self):\n str_loc = 'not available' if not self.uris else self.uris[0]\n return \"Dataset <id={id} type={type} location={loc}>\".format(id=self.id,\n type=self.type.name,\n loc=str_loc)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n @property\n def metadata(self) -> DocReader:\n if self.metadata_type is None:\n raise ValueError('Can not interpret dataset without metadata type set')\n return self.metadata_type.dataset_reader(self.metadata_doc)\n\n def metadata_doc_without_lineage(self) -> Dict[str, Any]:\n \"\"\" Return metadata document without nested lineage datasets\n \"\"\"\n return without_lineage_sources(self.metadata_doc, self.metadata_type)\n\n\nclass Measurement(dict):\n \"\"\"\n Describes a single data variable of a Product or Dataset.\n\n Must include, which can be used when loading and interpreting data:\n\n - name\n - dtype - eg: int8, int16, float32\n - nodata - What value represent No Data\n - units\n\n Attributes can be accessed using ``dict []`` syntax.\n\n Can also include attributes like alternative names 'aliases', and spectral and bit flags\n definitions to aid with interpreting the data.\n\n \"\"\"\n REQUIRED_KEYS = ('name', 'dtype', 'nodata', 'units')\n OPTIONAL_KEYS = ('aliases', 'spectral_definition', 'flags_definition')\n ATTR_BLACKLIST = set(['name', 'dtype', 'aliases', 'resampling_method', 'fuser'])\n\n def __init__(self, **kwargs):\n missing_keys = set(self.REQUIRED_KEYS) - set(kwargs)\n if missing_keys:\n raise ValueError(\"Measurement required keys missing: {}\".format(missing_keys))\n\n super().__init__(**kwargs)\n\n def __getattr__(self, key: str) -> Any:\n \"\"\" Allow access to items as attributes. \"\"\"\n v = self.get(key, self)\n if v is self:\n raise AttributeError(\"'Measurement' object has no attribute '{}'\".format(key))\n return v\n\n def __repr__(self) -> str:\n return \"Measurement({})\".format(super(Measurement, self).__repr__())\n\n def copy(self) -> 'Measurement':\n \"\"\"Required as the super class `dict` method returns a `dict`\n and does not preserve Measurement class\"\"\"\n return Measurement(**self)\n\n def dataarray_attrs(self) -> Dict[str, Any]:\n \"\"\"This returns attributes filtered for display in a dataarray.\"\"\"\n return {key: value for key, value in self.items() if key not in self.ATTR_BLACKLIST}\n\n\n@schema_validated(SCHEMA_PATH / 'metadata-type-schema.yaml')\nclass MetadataType(object):\n \"\"\"Metadata Type definition\"\"\"\n\n def __init__(self,\n definition: Mapping[str, Any],\n dataset_search_fields: Mapping[str, Field],\n id_: Optional[int] = None):\n self.definition = definition\n self.dataset_fields = dataset_search_fields\n self.id = id_\n\n @property\n def name(self) -> str:\n return self.definition['name']\n\n @property\n def description(self) -> str:\n return self.definition['description']\n\n def dataset_reader(self, dataset_doc: Mapping[str, Field]) -> DocReader:\n return DocReader(self.definition['dataset'], self.dataset_fields, dataset_doc)\n\n def __str__(self) -> str:\n return \"MetadataType(name={name!r}, id_={id!r})\".format(id=self.id, name=self.name)\n\n def __repr__(self) -> str:\n return str(self)\n\n\n@schema_validated(SCHEMA_PATH / 'dataset-type-schema.yaml')\nclass DatasetType(object):\n \"\"\"\n Product definition\n\n :param MetadataType metadata_type:\n :param dict definition:\n \"\"\"\n\n def __init__(self,\n metadata_type: MetadataType,\n definition: Mapping[str, Any],\n id_: Optional[int] = None):\n assert isinstance(metadata_type, MetadataType)\n self.id = id_\n self.metadata_type = metadata_type\n #: product definition document\n self.definition = definition\n\n @property\n def name(self) -> str:\n return self.definition['name']\n\n @property\n def managed(self) -> bool:\n return self.definition.get('managed', False)\n\n @property\n def metadata_doc(self) -> Mapping[str, Any]:\n return self.definition['metadata']\n\n @property\n def metadata(self) -> DocReader:\n return self.metadata_type.dataset_reader(self.metadata_doc)\n\n @property\n def fields(self):\n return self.metadata_type.dataset_reader(self.metadata_doc).fields\n\n @property\n def measurements(self) -> Mapping[str, Measurement]:\n \"\"\"\n Dictionary of measurements in this product\n \"\"\"\n return OrderedDict((m['name'], Measurement(**m)) for m in self.definition.get('measurements', []))\n\n @property\n def dimensions(self) -> Tuple[str, str]:\n \"\"\"\n List of dimension labels for data in this product\n \"\"\"\n assert self.metadata_type.name == 'eo'\n if self.grid_spec is not None:\n spatial_dims = self.grid_spec.dimensions\n else:\n spatial_dims = DEFAULT_SPATIAL_DIMS\n\n return ('time',) + spatial_dims\n\n @cached_property\n def grid_spec(self) -> Optional['GridSpec']:\n \"\"\"\n Grid specification for this product\n \"\"\"\n storage = self.definition.get('storage')\n if storage is None:\n return None\n\n crs = storage.get('crs')\n if crs is None:\n return None\n\n crs = geometry.CRS(str(crs).strip())\n\n def extract_point(name):\n xx = storage.get(name, None)\n return None if xx is None else tuple(xx[dim] for dim in crs.dimensions)\n\n gs_params = {name: extract_point(name)\n for name in ('tile_size', 'resolution', 'origin')}\n\n return GridSpec(crs=crs, **gs_params)\n\n def canonical_measurement(self, measurement: str) -> str:\n \"\"\" resolve measurement alias into canonical name\n \"\"\"\n for m in self.measurements:\n if measurement == m:\n return measurement\n elif measurement in self.measurements[m].get('aliases', []):\n return m\n raise KeyError(measurement)\n\n def lookup_measurements(self, measurements: Optional[List[str]] = None) -> Mapping[str, Measurement]:\n \"\"\"\n Find measurements by name\n\n :param measurements: list of measurement names\n \"\"\"\n my_measurements = self.measurements\n if measurements is None:\n return my_measurements\n canonical = [self.canonical_measurement(measurement) for measurement in measurements]\n return OrderedDict((measurement, my_measurements[measurement]) for measurement in canonical)\n\n def dataset_reader(self, dataset_doc):\n return self.metadata_type.dataset_reader(dataset_doc)\n\n def to_dict(self) -> Mapping[str, Any]:\n \"\"\"\n Convert to a dictionary representation of the available fields\n \"\"\"\n row = {\n 'id': self.id,\n 'name': self.name,\n 'description': self.definition['description'],\n }\n row.update(self.fields)\n if self.grid_spec is not None:\n row.update({\n 'crs': str(self.grid_spec.crs),\n 'spatial_dimensions': self.grid_spec.dimensions,\n 'tile_size': self.grid_spec.tile_size,\n 'resolution': self.grid_spec.resolution,\n })\n return row\n\n def __str__(self) -> str:\n return \"DatasetType(name={name!r}, id_={id!r})\".format(id=self.id, name=self.name)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n # Types are uniquely identifiable by name:\n\n def __eq__(self, other) -> bool:\n if self is other:\n return True\n\n if self.__class__ != other.__class__:\n return False\n\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n\nclass GridSpec(object):\n \"\"\"\n Definition for a regular spatial grid\n\n >>> gs = GridSpec(crs=geometry.CRS('EPSG:4326'), tile_size=(1, 1), resolution=(-0.1, 0.1), origin=(-50.05, 139.95))\n >>> gs.tile_resolution\n (10, 10)\n >>> list(gs.tiles(geometry.BoundingBox(140, -50, 141.5, -48.5)))\n [((0, 0), GeoBox(10, 10, Affine(0.1, 0.0, 139.95,\n 0.0, -0.1, -49.05), EPSG:4326)), ((1, 0), GeoBox(10, 10, Affine(0.1, 0.0, 140.95,\n 0.0, -0.1, -49.05), EPSG:4326)), ((0, 1), GeoBox(10, 10, Affine(0.1, 0.0, 139.95,\n 0.0, -0.1, -48.05), EPSG:4326)), ((1, 1), GeoBox(10, 10, Affine(0.1, 0.0, 140.95,\n 0.0, -0.1, -48.05), EPSG:4326))]\n\n :param geometry.CRS crs: Coordinate System used to define the grid\n :param [float,float] tile_size: (Y, X) size of each tile, in CRS units\n :param [float,float] resolution: (Y, X) size of each data point in the grid, in CRS units. Y will\n usually be negative.\n :param [float,float] origin: (Y, X) coordinates of a corner of the (0,0) tile in CRS units. default is (0.0, 0.0)\n \"\"\"\n\n def __init__(self,\n crs: geometry.CRS,\n tile_size: Tuple[float, float],\n resolution: Tuple[float, float],\n origin: Optional[Tuple[float, float]] = None):\n self.crs = crs\n self.tile_size = tile_size\n self.resolution = resolution\n self.origin = origin or (0.0, 0.0)\n\n def __eq__(self, other):\n if not isinstance(other, GridSpec):\n return False\n\n return (self.crs == other.crs\n and self.tile_size == other.tile_size\n and self.resolution == other.resolution\n and self.origin == other.origin)\n\n @property\n def dimensions(self) -> Tuple[str, str]:\n \"\"\"\n List of dimension names of the grid spec\n\n \"\"\"\n return self.crs.dimensions\n\n @property\n def alignment(self) -> Tuple[float, float]:\n \"\"\"\n Pixel boundary alignment\n \"\"\"\n y, x = (orig % abs(res) for orig, res in zip(self.origin, self.resolution))\n return (y, x)\n\n @property\n def tile_resolution(self) -> Tuple[float, float]:\n \"\"\"\n Tile size in pixels in CRS dimension order (Usually y,x or lat,lon)\n \"\"\"\n y, x = (int(abs(ts / res)) for ts, res in zip(self.tile_size, self.resolution))\n return (y, x)\n\n def tile_coords(self, tile_index: Tuple[int, int]) -> Tuple[float, float]:\n \"\"\"\n Tile coordinates in (Y,X) order\n\n :param tile_index: in X,Y order\n \"\"\"\n\n def coord(index: int,\n resolution: float,\n size: float,\n origin: float) -> float:\n return (index + (1 if resolution < 0 < size else 0)) * size + origin\n\n y, x = (coord(index, res, size, origin)\n for index, res, size, origin in zip(tile_index[::-1], self.resolution, self.tile_size, self.origin))\n return (y, x)\n\n def tile_geobox(self, tile_index: Tuple[int, int]) -> geometry.GeoBox:\n \"\"\"\n Tile geobox.\n\n :param (int,int) tile_index:\n \"\"\"\n res_y, res_x = self.resolution\n y, x = self.tile_coords(tile_index)\n h, w = self.tile_resolution\n geobox = geometry.GeoBox(crs=self.crs, affine=Affine(res_x, 0.0, x, 0.0, res_y, y), width=w, height=h)\n return geobox\n\n def tiles(self, bounds: geometry.BoundingBox,\n geobox_cache: Optional[dict] = None) -> Iterator[Tuple[Tuple[int, int],\n geometry.GeoBox]]:\n \"\"\"\n Returns an iterator of tile_index, :py:class:`GeoBox` tuples across\n the grid and overlapping with the specified `bounds` rectangle.\n\n .. note::\n\n Grid cells are referenced by coordinates `(x, y)`, which is the opposite to the usual CRS\n dimension order.\n\n :param BoundingBox bounds: Boundary coordinates of the required grid\n :param dict geobox_cache: Optional cache to re-use geoboxes instead of creating new one each time\n :return: iterator of grid cells with :py:class:`GeoBox` tiles\n \"\"\"\n def geobox(tile_index):\n if geobox_cache is None:\n return self.tile_geobox(tile_index)\n\n gbox = geobox_cache.get(tile_index)\n if gbox is None:\n gbox = self.tile_geobox(tile_index)\n geobox_cache[tile_index] = gbox\n return gbox\n\n tile_size_y, tile_size_x = self.tile_size\n tile_origin_y, tile_origin_x = self.origin\n for y in GridSpec.grid_range(bounds.bottom - tile_origin_y, bounds.top - tile_origin_y, tile_size_y):\n for x in GridSpec.grid_range(bounds.left - tile_origin_x, bounds.right - tile_origin_x, tile_size_x):\n tile_index = (x, y)\n yield tile_index, geobox(tile_index)\n\n def tiles_from_geopolygon(self, geopolygon: geometry.Geometry,\n tile_buffer: Optional[Tuple[float, float]] = None,\n geobox_cache: Optional[dict] = None) -> Iterator[Tuple[Tuple[int, int],\n geometry.GeoBox]]:\n \"\"\"\n Returns an iterator of tile_index, :py:class:`GeoBox` tuples across\n the grid and overlapping with the specified `geopolygon`.\n\n .. note::\n\n Grid cells are referenced by coordinates `(x, y)`, which is the opposite to the usual CRS\n dimension order.\n\n :param geometry.Geometry geopolygon: Polygon to tile\n :param tile_buffer: Optional <float,float> tuple, (extra padding for the query\n in native units of this GridSpec)\n :param dict geobox_cache: Optional cache to re-use geoboxes instead of creating new one each time\n :return: iterator of grid cells with :py:class:`GeoBox` tiles\n \"\"\"\n geopolygon = geopolygon.to_crs(self.crs)\n bbox = geopolygon.boundingbox\n bbox = bbox.buffered(*tile_buffer) if tile_buffer else bbox\n\n for tile_index, tile_geobox in self.tiles(bbox, geobox_cache):\n tile_geobox = tile_geobox.buffered(*tile_buffer) if tile_buffer else tile_geobox\n\n if geometry.intersects(tile_geobox.extent, geopolygon):\n yield (tile_index, tile_geobox)\n\n @staticmethod\n def grid_range(lower: float, upper: float, step: float) -> range:\n \"\"\"\n Returns the indices along a 1D scale.\n\n Used for producing 2D grid indices.\n\n >>> list(GridSpec.grid_range(-4.0, -1.0, 3.0))\n [-2, -1]\n >>> list(GridSpec.grid_range(1.0, 4.0, -3.0))\n [-2, -1]\n >>> list(GridSpec.grid_range(-3.0, 0.0, 3.0))\n [-1]\n >>> list(GridSpec.grid_range(-2.0, 1.0, 3.0))\n [-1, 0]\n >>> list(GridSpec.grid_range(-1.0, 2.0, 3.0))\n [-1, 0]\n >>> list(GridSpec.grid_range(0.0, 3.0, 3.0))\n [0]\n >>> list(GridSpec.grid_range(1.0, 4.0, 3.0))\n [0, 1]\n \"\"\"\n if step < 0.0:\n lower, upper, step = -upper, -lower, -step\n assert step > 0.0\n return range(int(math.floor(lower / step)), int(math.ceil(upper / step)))\n\n def __str__(self) -> str:\n return \"GridSpec(crs=%s, tile_size=%s, resolution=%s)\" % (\n self.crs, self.tile_size, self.resolution)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n\ndef metadata_from_doc(doc: Mapping[str, Any]) -> MetadataType:\n \"\"\"Construct MetadataType that is not tied to any particular db index. This is\n useful when there is a need to interpret dataset metadata documents\n according to metadata spec.\n \"\"\"\n from .fields import get_dataset_fields\n MetadataType.validate(doc) # type: ignore\n return MetadataType(doc, get_dataset_fields(doc))\n", "path": "datacube/model/__init__.py" } ]
[ { "content": "# coding=utf-8\n\"\"\"\nCore classes used across modules.\n\"\"\"\nimport logging\nimport math\nimport warnings\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom pathlib import Path\nfrom uuid import UUID\n\nfrom affine import Affine\nfrom typing import Optional, List, Mapping, Any, Dict, Tuple, Iterator\n\nfrom urllib.parse import urlparse\nfrom datacube.utils import geometry, without_lineage_sources, parse_time, cached_property, uri_to_local_path, \\\n schema_validated, DocReader\nfrom .fields import Field\nfrom ._base import Range\n\n_LOG = logging.getLogger(__name__)\n\nDEFAULT_SPATIAL_DIMS = ('y', 'x') # Used when product lacks grid_spec\n\nSCHEMA_PATH = Path(__file__).parent / 'schema'\n\n\nclass Dataset(object):\n \"\"\"\n A Dataset. A container of metadata, and refers typically to a multi-band raster on disk.\n\n Most important parts are the metadata_doc and uri.\n\n :param metadata_doc: the document (typically a parsed json/yaml)\n :param uris: All active uris for the dataset\n \"\"\"\n\n def __init__(self,\n type_: 'DatasetType',\n metadata_doc: dict,\n local_uri: Optional[str] = None,\n uris: Optional[List[str]] = None,\n sources: Optional[Mapping[str, 'Dataset']] = None,\n indexed_by: Optional[str] = None,\n indexed_time: Optional[datetime] = None,\n archived_time: Optional[datetime] = None):\n assert isinstance(type_, DatasetType)\n\n self.type = type_\n\n #: The document describing the dataset as a dictionary. It is often serialised as YAML on disk\n #: or inside a NetCDF file, and as JSON-B inside the database index.\n self.metadata_doc = metadata_doc\n\n if local_uri:\n warnings.warn(\n \"Dataset.local_uri has been replaced with list Dataset.uris\",\n DeprecationWarning\n )\n if not uris:\n uris = []\n\n uris.append(local_uri)\n\n #: Active URIs in order from newest to oldest\n self.uris = uris\n\n #: The datasets that this dataset is derived from (if requested on load).\n self.sources = sources\n\n if self.sources is not None:\n assert set(self.metadata.sources.keys()) == set(self.sources.keys())\n\n #: The User who indexed this dataset\n self.indexed_by = indexed_by\n self.indexed_time = indexed_time\n # When the dataset was archived. Null it not archived.\n self.archived_time = archived_time\n\n @property\n def metadata_type(self) -> Optional['MetadataType']:\n return self.type.metadata_type if self.type else None\n\n @property\n def local_uri(self) -> Optional[str]:\n \"\"\"\n The latest local file uri, if any.\n \"\"\"\n if self.uris is None:\n return None\n\n local_uris = [uri for uri in self.uris if uri.startswith('file:')]\n if local_uris:\n return local_uris[0]\n\n return None\n\n @property\n def local_path(self) -> Optional[Path]:\n \"\"\"\n A path to this dataset on the local filesystem (if available).\n \"\"\"\n return uri_to_local_path(self.local_uri)\n\n @property\n def id(self) -> UUID:\n \"\"\" UUID of a dataset\n \"\"\"\n # This is a string in a raw document.\n return UUID(self.metadata.id)\n\n @property\n def managed(self) -> bool:\n return self.type.managed\n\n @property\n def format(self) -> str:\n return self.metadata.format\n\n @property\n def uri_scheme(self) -> str:\n if self.uris is None or len(self.uris) == 0:\n return ''\n\n url = urlparse(self.uris[0])\n if url.scheme == '':\n return 'file'\n return url.scheme\n\n @property\n def measurements(self) -> Dict[str, Any]:\n # It's an optional field in documents.\n # Dictionary of key -> measurement descriptor\n if not hasattr(self.metadata, 'measurements'):\n return {}\n return self.metadata.measurements\n\n @cached_property\n def center_time(self) -> Optional[datetime]:\n \"\"\" mid-point of time range\n \"\"\"\n time = self.time\n if time is None:\n return None\n return time.begin + (time.end - time.begin) // 2\n\n @property\n def time(self) -> Optional[Range]:\n try:\n time = self.metadata.time\n return Range(parse_time(time.begin), parse_time(time.end))\n except AttributeError:\n return None\n\n @cached_property\n def key_time(self):\n \"\"\"\n :rtype: datetime.datetime\n \"\"\"\n if 'key_time' in self.metadata.fields:\n return self.metadata.key_time\n\n # Existing datasets are already using the computed \"center_time\" for their storage index key\n # if 'center_time' in self.metadata.fields:\n # return self.metadata.center_time\n\n return self.center_time\n\n @property\n def bounds(self) -> Optional[geometry.BoundingBox]:\n \"\"\" :returns: bounding box of the dataset in the native crs\n \"\"\"\n gs = self._gs\n if gs is None:\n return None\n\n bounds = gs['geo_ref_points']\n return geometry.BoundingBox(left=min(bounds['ur']['x'], bounds['ll']['x']),\n right=max(bounds['ur']['x'], bounds['ll']['x']),\n top=max(bounds['ur']['y'], bounds['ll']['y']),\n bottom=min(bounds['ur']['y'], bounds['ll']['y']))\n\n @property\n def transform(self) -> Optional[Affine]:\n geo = self._gs\n if geo is None:\n return None\n\n bounds = geo.get('geo_ref_points')\n if bounds is None:\n return None\n\n return Affine(bounds['lr']['x'] - bounds['ul']['x'], 0, bounds['ul']['x'],\n 0, bounds['lr']['y'] - bounds['ul']['y'], bounds['ul']['y'])\n\n @property\n def is_archived(self) -> bool:\n \"\"\"\n Is this dataset archived?\n\n (an archived dataset is one that is not intended to be used by users anymore: eg. it has been\n replaced by another dataset. It will not show up in search results, but still exists in the\n system via provenance chains or through id lookup.)\n\n \"\"\"\n return self.archived_time is not None\n\n @property\n def is_active(self) -> bool:\n \"\"\"\n Is this dataset active?\n\n (ie. dataset hasn't been archived)\n\n \"\"\"\n return not self.is_archived\n\n @property\n def _gs(self) -> Optional[Dict[str, Any]]:\n try:\n return self.metadata.grid_spatial\n except AttributeError:\n return None\n\n @property\n def crs(self) -> Optional[geometry.CRS]:\n \"\"\" Return CRS if available\n \"\"\"\n projection = self._gs\n\n if not projection:\n return None\n\n crs = projection.get('spatial_reference', None)\n if crs:\n return geometry.CRS(str(crs))\n\n # Try to infer CRS\n zone_ = projection.get('zone')\n datum_ = projection.get('datum')\n if zone_ and datum_:\n try:\n # TODO: really need CRS specified properly in agdc-metadata.yaml\n if datum_ == 'GDA94':\n return geometry.CRS('EPSG:283' + str(abs(zone_)))\n if datum_ == 'WGS84':\n if zone_[-1] == 'S':\n return geometry.CRS('EPSG:327' + str(abs(int(zone_[:-1]))))\n else:\n return geometry.CRS('EPSG:326' + str(abs(int(zone_[:-1]))))\n except geometry.InvalidCRSError:\n # We still return None, as they didn't specify a CRS explicitly...\n _LOG.warning(\n \"Can't figure out projection: possibly invalid zone (%r) for datum (%r).\", zone_, datum_\n )\n\n return None\n\n @cached_property\n def extent(self) -> Optional[geometry.Geometry]:\n \"\"\" :returns: valid extent of the dataset or None\n \"\"\"\n\n def xytuple(obj):\n return obj['x'], obj['y']\n\n # If no projection or crs, they have no extent.\n projection = self._gs\n if not projection:\n return None\n crs = self.crs\n if not crs:\n _LOG.debug(\"No CRS, assuming no extent (dataset %s)\", self.id)\n return None\n\n valid_data = projection.get('valid_data')\n geo_ref_points = projection.get('geo_ref_points')\n if valid_data:\n return geometry.Geometry(valid_data, crs=crs)\n elif geo_ref_points:\n return geometry.polygon([xytuple(geo_ref_points[key]) for key in ('ll', 'ul', 'ur', 'lr', 'll')],\n crs=crs)\n\n return None\n\n def __eq__(self, other) -> bool:\n if isinstance(other, Dataset):\n return self.id == other.id\n return False\n\n def __hash__(self):\n return hash(self.id)\n\n def __str__(self):\n str_loc = 'not available' if not self.uris else self.uris[0]\n return \"Dataset <id={id} type={type} location={loc}>\".format(id=self.id,\n type=self.type.name,\n loc=str_loc)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n @property\n def metadata(self) -> DocReader:\n if self.metadata_type is None:\n raise ValueError('Can not interpret dataset without metadata type set')\n return self.metadata_type.dataset_reader(self.metadata_doc)\n\n def metadata_doc_without_lineage(self) -> Dict[str, Any]:\n \"\"\" Return metadata document without nested lineage datasets\n \"\"\"\n return without_lineage_sources(self.metadata_doc, self.metadata_type)\n\n\nclass Measurement(dict):\n \"\"\"\n Describes a single data variable of a Product or Dataset.\n\n Must include, which can be used when loading and interpreting data:\n\n - name\n - dtype - eg: int8, int16, float32\n - nodata - What value represent No Data\n - units\n\n Attributes can be accessed using ``dict []`` syntax.\n\n Can also include attributes like alternative names 'aliases', and spectral and bit flags\n definitions to aid with interpreting the data.\n\n \"\"\"\n REQUIRED_KEYS = ('name', 'dtype', 'nodata', 'units')\n OPTIONAL_KEYS = ('aliases', 'spectral_definition', 'flags_definition')\n ATTR_BLACKLIST = set(['name', 'dtype', 'aliases', 'resampling_method', 'fuser'])\n\n def __init__(self, **kwargs):\n missing_keys = set(self.REQUIRED_KEYS) - set(kwargs)\n if missing_keys:\n raise ValueError(\"Measurement required keys missing: {}\".format(missing_keys))\n\n super().__init__(**kwargs)\n\n def __getattr__(self, key: str) -> Any:\n \"\"\" Allow access to items as attributes. \"\"\"\n v = self.get(key, self)\n if v is self:\n raise AttributeError(\"'Measurement' object has no attribute '{}'\".format(key))\n return v\n\n def __repr__(self) -> str:\n return \"Measurement({})\".format(super(Measurement, self).__repr__())\n\n def copy(self) -> 'Measurement':\n \"\"\"Required as the super class `dict` method returns a `dict`\n and does not preserve Measurement class\"\"\"\n return Measurement(**self)\n\n def dataarray_attrs(self) -> Dict[str, Any]:\n \"\"\"This returns attributes filtered for display in a dataarray.\"\"\"\n return {key: value for key, value in self.items() if key not in self.ATTR_BLACKLIST}\n\n\n@schema_validated(SCHEMA_PATH / 'metadata-type-schema.yaml')\nclass MetadataType(object):\n \"\"\"Metadata Type definition\"\"\"\n\n def __init__(self,\n definition: Mapping[str, Any],\n dataset_search_fields: Mapping[str, Field],\n id_: Optional[int] = None):\n self.definition = definition\n self.dataset_fields = dataset_search_fields\n self.id = id_\n\n @property\n def name(self) -> str:\n return self.definition['name']\n\n @property\n def description(self) -> str:\n return self.definition['description']\n\n def dataset_reader(self, dataset_doc: Mapping[str, Field]) -> DocReader:\n return DocReader(self.definition['dataset'], self.dataset_fields, dataset_doc)\n\n def __str__(self) -> str:\n return \"MetadataType(name={name!r}, id_={id!r})\".format(id=self.id, name=self.name)\n\n def __repr__(self) -> str:\n return str(self)\n\n\n@schema_validated(SCHEMA_PATH / 'dataset-type-schema.yaml')\nclass DatasetType(object):\n \"\"\"\n Product definition\n\n :param MetadataType metadata_type:\n :param dict definition:\n \"\"\"\n\n def __init__(self,\n metadata_type: MetadataType,\n definition: Mapping[str, Any],\n id_: Optional[int] = None):\n assert isinstance(metadata_type, MetadataType)\n self.id = id_\n self.metadata_type = metadata_type\n #: product definition document\n self.definition = definition\n\n @property\n def name(self) -> str:\n return self.definition['name']\n\n @property\n def managed(self) -> bool:\n return self.definition.get('managed', False)\n\n @property\n def metadata_doc(self) -> Mapping[str, Any]:\n return self.definition['metadata']\n\n @property\n def metadata(self) -> DocReader:\n return self.metadata_type.dataset_reader(self.metadata_doc)\n\n @property\n def fields(self):\n return self.metadata_type.dataset_reader(self.metadata_doc).fields\n\n @property\n def measurements(self) -> Mapping[str, Measurement]:\n \"\"\"\n Dictionary of measurements in this product\n \"\"\"\n return OrderedDict((m['name'], Measurement(**m)) for m in self.definition.get('measurements', []))\n\n @property\n def dimensions(self) -> Tuple[str, str]:\n \"\"\"\n List of dimension labels for data in this product\n \"\"\"\n assert self.metadata_type.name == 'eo'\n if self.grid_spec is not None:\n spatial_dims = self.grid_spec.dimensions\n else:\n spatial_dims = DEFAULT_SPATIAL_DIMS\n\n return ('time',) + spatial_dims\n\n @cached_property\n def grid_spec(self) -> Optional['GridSpec']:\n \"\"\"\n Grid specification for this product\n \"\"\"\n storage = self.definition.get('storage')\n if storage is None:\n return None\n\n crs = storage.get('crs')\n if crs is None:\n return None\n\n crs = geometry.CRS(str(crs).strip())\n\n def extract_point(name):\n xx = storage.get(name, None)\n return None if xx is None else tuple(xx[dim] for dim in crs.dimensions)\n\n gs_params = {name: extract_point(name)\n for name in ('tile_size', 'resolution', 'origin')}\n\n return GridSpec(crs=crs, **gs_params)\n\n def canonical_measurement(self, measurement: str) -> str:\n \"\"\" resolve measurement alias into canonical name\n \"\"\"\n for m in self.measurements:\n if measurement == m:\n return measurement\n elif measurement in self.measurements[m].get('aliases', []):\n return m\n raise KeyError(measurement)\n\n def lookup_measurements(self, measurements: Optional[List[str]] = None) -> Mapping[str, Measurement]:\n \"\"\"\n Find measurements by name\n\n :param measurements: list of measurement names\n \"\"\"\n my_measurements = self.measurements\n if measurements is None:\n return my_measurements\n canonical = [self.canonical_measurement(measurement) for measurement in measurements]\n return OrderedDict((measurement, my_measurements[measurement]) for measurement in canonical)\n\n def dataset_reader(self, dataset_doc):\n return self.metadata_type.dataset_reader(dataset_doc)\n\n def to_dict(self) -> Mapping[str, Any]:\n \"\"\"\n Convert to a dictionary representation of the available fields\n \"\"\"\n row = {\n 'id': self.id,\n 'name': self.name,\n 'description': self.definition['description'],\n }\n row.update(self.fields)\n if self.grid_spec is not None:\n row.update({\n 'crs': str(self.grid_spec.crs),\n 'spatial_dimensions': self.grid_spec.dimensions,\n 'tile_size': self.grid_spec.tile_size,\n 'resolution': self.grid_spec.resolution,\n })\n return row\n\n def __str__(self) -> str:\n return \"DatasetType(name={name!r}, id_={id!r})\".format(id=self.id, name=self.name)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n # Types are uniquely identifiable by name:\n\n def __eq__(self, other) -> bool:\n if self is other:\n return True\n\n if self.__class__ != other.__class__:\n return False\n\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n\nclass GridSpec(object):\n \"\"\"\n Definition for a regular spatial grid\n\n >>> gs = GridSpec(crs=geometry.CRS('EPSG:4326'), tile_size=(1, 1), resolution=(-0.1, 0.1), origin=(-50.05, 139.95))\n >>> gs.tile_resolution\n (10, 10)\n >>> list(gs.tiles(geometry.BoundingBox(140, -50, 141.5, -48.5)))\n [((0, 0), GeoBox(10, 10, Affine(0.1, 0.0, 139.95,\n 0.0, -0.1, -49.05), EPSG:4326)), ((1, 0), GeoBox(10, 10, Affine(0.1, 0.0, 140.95,\n 0.0, -0.1, -49.05), EPSG:4326)), ((0, 1), GeoBox(10, 10, Affine(0.1, 0.0, 139.95,\n 0.0, -0.1, -48.05), EPSG:4326)), ((1, 1), GeoBox(10, 10, Affine(0.1, 0.0, 140.95,\n 0.0, -0.1, -48.05), EPSG:4326))]\n\n :param geometry.CRS crs: Coordinate System used to define the grid\n :param [float,float] tile_size: (Y, X) size of each tile, in CRS units\n :param [float,float] resolution: (Y, X) size of each data point in the grid, in CRS units. Y will\n usually be negative.\n :param [float,float] origin: (Y, X) coordinates of a corner of the (0,0) tile in CRS units. default is (0.0, 0.0)\n \"\"\"\n\n def __init__(self,\n crs: geometry.CRS,\n tile_size: Tuple[float, float],\n resolution: Tuple[float, float],\n origin: Optional[Tuple[float, float]] = None):\n self.crs = crs\n self.tile_size = tile_size\n self.resolution = resolution\n self.origin = origin or (0.0, 0.0)\n\n def __eq__(self, other):\n if not isinstance(other, GridSpec):\n return False\n\n return (self.crs == other.crs\n and self.tile_size == other.tile_size\n and self.resolution == other.resolution\n and self.origin == other.origin)\n\n @property\n def dimensions(self) -> Tuple[str, str]:\n \"\"\"\n List of dimension names of the grid spec\n\n \"\"\"\n return self.crs.dimensions\n\n @property\n def alignment(self) -> Tuple[float, float]:\n \"\"\"\n Pixel boundary alignment\n \"\"\"\n y, x = (orig % abs(res) for orig, res in zip(self.origin, self.resolution))\n return (y, x)\n\n @property\n def tile_resolution(self) -> Tuple[float, float]:\n \"\"\"\n Tile size in pixels in CRS dimension order (Usually y,x or lat,lon)\n \"\"\"\n y, x = (int(abs(ts / res)) for ts, res in zip(self.tile_size, self.resolution))\n return (y, x)\n\n def tile_coords(self, tile_index: Tuple[int, int]) -> Tuple[float, float]:\n \"\"\"\n Tile coordinates in (Y,X) order\n\n :param tile_index: in X,Y order\n \"\"\"\n\n def coord(index: int,\n resolution: float,\n size: float,\n origin: float) -> float:\n return (index + (1 if resolution < 0 < size else 0)) * size + origin\n\n y, x = (coord(index, res, size, origin)\n for index, res, size, origin in zip(tile_index[::-1], self.resolution, self.tile_size, self.origin))\n return (y, x)\n\n def tile_geobox(self, tile_index: Tuple[int, int]) -> geometry.GeoBox:\n \"\"\"\n Tile geobox.\n\n :param (int,int) tile_index:\n \"\"\"\n res_y, res_x = self.resolution\n y, x = self.tile_coords(tile_index)\n h, w = self.tile_resolution\n geobox = geometry.GeoBox(crs=self.crs, affine=Affine(res_x, 0.0, x, 0.0, res_y, y), width=w, height=h)\n return geobox\n\n def tiles(self, bounds: geometry.BoundingBox,\n geobox_cache: Optional[dict] = None) -> Iterator[Tuple[Tuple[int, int],\n geometry.GeoBox]]:\n \"\"\"\n Returns an iterator of tile_index, :py:class:`GeoBox` tuples across\n the grid and overlapping with the specified `bounds` rectangle.\n\n .. note::\n\n Grid cells are referenced by coordinates `(x, y)`, which is the opposite to the usual CRS\n dimension order.\n\n :param BoundingBox bounds: Boundary coordinates of the required grid\n :param dict geobox_cache: Optional cache to re-use geoboxes instead of creating new one each time\n :return: iterator of grid cells with :py:class:`GeoBox` tiles\n \"\"\"\n def geobox(tile_index):\n if geobox_cache is None:\n return self.tile_geobox(tile_index)\n\n gbox = geobox_cache.get(tile_index)\n if gbox is None:\n gbox = self.tile_geobox(tile_index)\n geobox_cache[tile_index] = gbox\n return gbox\n\n tile_size_y, tile_size_x = self.tile_size\n tile_origin_y, tile_origin_x = self.origin\n for y in GridSpec.grid_range(bounds.bottom - tile_origin_y, bounds.top - tile_origin_y, tile_size_y):\n for x in GridSpec.grid_range(bounds.left - tile_origin_x, bounds.right - tile_origin_x, tile_size_x):\n tile_index = (x, y)\n yield tile_index, geobox(tile_index)\n\n def tiles_from_geopolygon(self, geopolygon: geometry.Geometry,\n tile_buffer: Optional[Tuple[float, float]] = None,\n geobox_cache: Optional[dict] = None) -> Iterator[Tuple[Tuple[int, int],\n geometry.GeoBox]]:\n \"\"\"\n Returns an iterator of tile_index, :py:class:`GeoBox` tuples across\n the grid and overlapping with the specified `geopolygon`.\n\n .. note::\n\n Grid cells are referenced by coordinates `(x, y)`, which is the opposite to the usual CRS\n dimension order.\n\n :param geometry.Geometry geopolygon: Polygon to tile\n :param tile_buffer: Optional <float,float> tuple, (extra padding for the query\n in native units of this GridSpec)\n :param dict geobox_cache: Optional cache to re-use geoboxes instead of creating new one each time\n :return: iterator of grid cells with :py:class:`GeoBox` tiles\n \"\"\"\n geopolygon = geopolygon.to_crs(self.crs)\n bbox = geopolygon.boundingbox\n bbox = bbox.buffered(*tile_buffer) if tile_buffer else bbox\n\n for tile_index, tile_geobox in self.tiles(bbox, geobox_cache):\n tile_geobox = tile_geobox.buffered(*tile_buffer) if tile_buffer else tile_geobox\n\n if geometry.intersects(tile_geobox.extent, geopolygon):\n yield (tile_index, tile_geobox)\n\n @staticmethod\n def grid_range(lower: float, upper: float, step: float) -> range:\n \"\"\"\n Returns the indices along a 1D scale.\n\n Used for producing 2D grid indices.\n\n >>> list(GridSpec.grid_range(-4.0, -1.0, 3.0))\n [-2, -1]\n >>> list(GridSpec.grid_range(1.0, 4.0, -3.0))\n [-2, -1]\n >>> list(GridSpec.grid_range(-3.0, 0.0, 3.0))\n [-1]\n >>> list(GridSpec.grid_range(-2.0, 1.0, 3.0))\n [-1, 0]\n >>> list(GridSpec.grid_range(-1.0, 2.0, 3.0))\n [-1, 0]\n >>> list(GridSpec.grid_range(0.0, 3.0, 3.0))\n [0]\n >>> list(GridSpec.grid_range(1.0, 4.0, 3.0))\n [0, 1]\n \"\"\"\n if step < 0.0:\n lower, upper, step = -upper, -lower, -step\n assert step > 0.0\n return range(int(math.floor(lower / step)), int(math.ceil(upper / step)))\n\n def __str__(self) -> str:\n return \"GridSpec(crs=%s, tile_size=%s, resolution=%s)\" % (\n self.crs, self.tile_size, self.resolution)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n\ndef metadata_from_doc(doc: Mapping[str, Any]) -> MetadataType:\n \"\"\"Construct MetadataType that is not tied to any particular db index. This is\n useful when there is a need to interpret dataset metadata documents\n according to metadata spec.\n \"\"\"\n from .fields import get_dataset_fields\n MetadataType.validate(doc) # type: ignore\n return MetadataType(doc, get_dataset_fields(doc))\n", "path": "datacube/model/__init__.py" } ]
diff --git a/datacube/model/__init__.py b/datacube/model/__init__.py index a9c93b2971..ef7c588308 100644 --- a/datacube/model/__init__.py +++ b/datacube/model/__init__.py @@ -285,7 +285,9 @@ def xytuple(obj): return None def __eq__(self, other) -> bool: - return self.id == other.id + if isinstance(other, Dataset): + return self.id == other.id + return False def __hash__(self): return hash(self.id) diff --git a/tests/test_model.py b/tests/test_model.py index 4406f28844..521eb8c1c9 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -45,6 +45,14 @@ def test_gridspec_upperleft(): assert cells[(30, 15)].extent.boundingbox == tile_bbox +def test_dataset_basics(): + ds = mk_sample_dataset([dict(name='a')]) + assert ds == ds + assert ds != "33" + assert (ds == "33") is False + assert str(ds) == repr(ds) + + def test_dataset_measurement_paths(): format = 'GeoTiff'
joke2k__faker-318
Access to the Generator.random It would be nice if one could gain access to the Generator.random variable so that one could save/set the state. I realize I can pass in the seed, but one currently has no way of gathering what the seed/state is if using the automatically generated seed. I don't want to use a fixed seed, but I do want to log/print the seed used _if_ the tests fail. That is, I'd like to be able to do something like: `faker.generator.getstate()` (which gets the random state w/o exposing random) or `faker.generator.random.getstate()` (which gives access to the random variable) For now, the workaround appears to be to create a Faker object with your own Generator.
[ { "content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\nimport random\n\n\n_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\nrandom = random.Random()\n\n\nclass Generator(object):\n\n __config = {}\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n\n def add_provider(self, provider):\n\n if type(provider) is type:\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if hasattr(faker_function, '__call__') or \\\n isinstance(faker_function, (classmethod, staticmethod)):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n def seed(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n raise AttributeError('Unknown formatter \"{0}\"'.format(formatter))\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call.\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter = list(matches.groups())\n formatter[1] = self.format(formatter[1])\n return ''.join(formatter)\n", "path": "faker/generator.py" } ]
[ { "content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\nimport random\n\n\n_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\nrandom = random.Random()\n\n\nclass Generator(object):\n\n __config = {}\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n\n def add_provider(self, provider):\n\n if type(provider) is type:\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if hasattr(faker_function, '__call__') or \\\n isinstance(faker_function, (classmethod, staticmethod)):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n @property\n def random(self):\n return random\n\n def seed(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n raise AttributeError('Unknown formatter \"{0}\"'.format(formatter))\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call.\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter = list(matches.groups())\n formatter[1] = self.format(formatter[1])\n return ''.join(formatter)\n", "path": "faker/generator.py" } ]
diff --git a/README.rst b/README.rst index 4dc04a86fb..0941dbdaa7 100644 --- a/README.rst +++ b/README.rst @@ -263,13 +263,26 @@ How to use with factory-boy title = factory.LazyAttribute(lambda x: faker.sentence(nb_words=4)) author_name = factory.LazyAttribute(lambda x: faker.name()) +Accessing the `random` instance +------------------------------- + +The ``.random`` property on the generator returns the instance of ``random.Random`` +used to generate the values: + +__ code:: python + + from faker import Faker + fake = Faker() + fake.random + fake.random.getstate() + Seeding the Generator --------------------- When using Faker for unit testing, you will often want to generate the same -data set. The generator offers a ``seed()`` method, which seeds the random -number generator. Calling the same script twice with the same seed produces the -same results. +data set. For convenience, the generator also provide a ``seed()`` method, which +seeds the random number generator. Calling the same script twice with the same +seed produces the same results. .. code:: python @@ -280,8 +293,20 @@ same results. print fake.name() > Margaret Boehm +The code above is equivalent to the following: + +.. code:: python + + from faker import Faker + fake = Faker() + faker.random.seed(4321) + + print fake.name() + > Margaret Boehm + Tests ----- + Installing dependencies: .. code:: bash diff --git a/docs/index.rst b/docs/index.rst index 601d474d0d..7e96203cac 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -264,13 +264,26 @@ How to use with factory-boy title = factory.LazyAttribute(lambda x: faker.sentence(nb_words=4)) author_name = factory.LazyAttribute(lambda x: faker.name()) +Accessing the `random` instance +------------------------------- + +The ``.random`` property on the generator returns the instance of ``random.Random`` +used to generate the values: + +__ code:: python + + from faker import Faker + fake = Faker() + fake.random + fake.random.getstate() + Seeding the Generator --------------------- When using Faker for unit testing, you will often want to generate the same -data set. The generator offers a ``seed()`` method, which seeds the random -number generator. Calling the same script twice with the same seed produces the -same results. +data set. For convenience, the generator also provide a ``seed()`` method, which +seeds the random number generator. Calling the same script twice with the same +seed produces the same results. .. code:: python @@ -281,6 +294,17 @@ same results. print fake.name() > Margaret Boehm +The code above is equivalent to the following: + +.. code:: python + + from faker import Faker + fake = Faker() + faker.random.seed(4321) + + print fake.name() + > Margaret Boehm + Tests ----- diff --git a/faker/generator.py b/faker/generator.py index 95dfac2a73..74034cb440 100644 --- a/faker/generator.py +++ b/faker/generator.py @@ -50,6 +50,10 @@ def get_providers(self): """Returns added providers.""" return self.providers + @property + def random(self): + return random + def seed(self, seed=None): """Calls random.seed""" random.seed(seed) diff --git a/faker/tests/__init__.py b/faker/tests/__init__.py index 6502a4489d..5dc2252823 100644 --- a/faker/tests/__init__.py +++ b/faker/tests/__init__.py @@ -518,6 +518,12 @@ class GeneratorTestCase(unittest.TestCase): def setUp(self): self.generator = Generator() + @patch('random.getstate') + def test_get_random(self, mock_system_random): + random_instance = self.generator.random + random_instance.getstate() + self.assertFalse(mock_system_random.called) + @patch('random.seed') def test_random_seed_doesnt_seed_system_random(self, mock_system_random): self.generator.seed(0)
scikit-image__scikit-image-1430
measure.label is documented under morphology.label In the [measure API reference](http://scikit-image.org/docs/stable/api/skimage.measure.html) label is not documented, but it is [documented under morphology module](http://scikit-image.org/docs/stable/api/skimage.morphology.html#label) (which is depreciated).
[ { "content": "from ._find_contours import find_contours\nfrom ._marching_cubes import (marching_cubes, mesh_surface_area,\n correct_mesh_orientation)\nfrom ._regionprops import regionprops, perimeter\nfrom ._structural_similarity import structural_similarity\nfrom ._polygon import approximate_polygon, subdivide_polygon\nfrom ._pnpoly import points_in_poly, grid_points_in_poly\nfrom ._moments import moments, moments_central, moments_normalized, moments_hu\nfrom .profile import profile_line\nfrom .fit import LineModel, CircleModel, EllipseModel, ransac\nfrom .block import block_reduce\nfrom ._ccomp import label\n\n\n__all__ = ['find_contours',\n 'regionprops',\n 'perimeter',\n 'structural_similarity',\n 'approximate_polygon',\n 'subdivide_polygon',\n 'LineModel',\n 'CircleModel',\n 'EllipseModel',\n 'ransac',\n 'block_reduce',\n 'moments',\n 'moments_central',\n 'moments_normalized',\n 'moments_hu',\n 'marching_cubes',\n 'mesh_surface_area',\n 'correct_mesh_orientation',\n 'profile_line',\n 'label',\n 'points_in_poly',\n 'grid_points_in_poly']\n", "path": "skimage/measure/__init__.py" } ]
[ { "content": "from ._find_contours import find_contours\nfrom ._marching_cubes import (marching_cubes, mesh_surface_area,\n correct_mesh_orientation)\nfrom ._regionprops import regionprops, perimeter\nfrom ._structural_similarity import structural_similarity\nfrom ._polygon import approximate_polygon, subdivide_polygon\nfrom ._pnpoly import points_in_poly, grid_points_in_poly\nfrom ._moments import moments, moments_central, moments_normalized, moments_hu\nfrom .profile import profile_line\nfrom .fit import LineModel, CircleModel, EllipseModel, ransac\nfrom .block import block_reduce\nfrom ._label import label\n\n\n__all__ = ['find_contours',\n 'regionprops',\n 'perimeter',\n 'structural_similarity',\n 'approximate_polygon',\n 'subdivide_polygon',\n 'LineModel',\n 'CircleModel',\n 'EllipseModel',\n 'ransac',\n 'block_reduce',\n 'moments',\n 'moments_central',\n 'moments_normalized',\n 'moments_hu',\n 'marching_cubes',\n 'mesh_surface_area',\n 'correct_mesh_orientation',\n 'profile_line',\n 'label',\n 'points_in_poly',\n 'grid_points_in_poly']\n", "path": "skimage/measure/__init__.py" } ]
diff --git a/skimage/measure/__init__.py b/skimage/measure/__init__.py index e2a4a51aef4..9731d6da6fc 100755 --- a/skimage/measure/__init__.py +++ b/skimage/measure/__init__.py @@ -9,7 +9,7 @@ from .profile import profile_line from .fit import LineModel, CircleModel, EllipseModel, ransac from .block import block_reduce -from ._ccomp import label +from ._label import label __all__ = ['find_contours',
opendatacube__datacube-core-652
`unary_union` does not detect failures In this function: https://github.com/opendatacube/datacube-core/blob/0ab135ef9986bee0ea476ced76fad7b9c2f47328/datacube/utils/geometry/_base.py#L692-L695 https://github.com/opendatacube/datacube-core/blob/0ab135ef9986bee0ea476ced76fad7b9c2f47328/datacube/utils/geometry/_base.py#L711-L712 `ogr.UnionCascaded()` can return `None` on failure, this `None` is then passed on to `Geometry` constructor (`._geom` property), but `Geometry` class assumes that `_geom` is not `None`. So there is no clean way to check if union succeeded in the first place(without accessing private members), probably `unary_union` should just return `None` on failure to mimic behaviour of the underlying function.
[ { "content": "import functools\nimport math\nfrom collections import namedtuple, OrderedDict\nfrom typing import Tuple, Callable\n\nimport cachetools\nimport numpy\nfrom affine import Affine\nfrom osgeo import ogr, osr\n\nfrom .tools import roi_normalise, roi_shape\n\nCoordinate = namedtuple('Coordinate', ('values', 'units'))\n_BoundingBox = namedtuple('BoundingBox', ('left', 'bottom', 'right', 'top'))\n\n\nclass BoundingBox(_BoundingBox):\n \"\"\"Bounding box, defining extent in cartesian coordinates.\n \"\"\"\n\n def buffered(self, ybuff, xbuff):\n \"\"\"\n Return a new BoundingBox, buffered in the x and y dimensions.\n\n :param ybuff: Y dimension buffering amount\n :param xbuff: X dimension buffering amount\n :return: new BoundingBox\n \"\"\"\n return BoundingBox(left=self.left - xbuff, right=self.right + xbuff,\n top=self.top + ybuff, bottom=self.bottom - ybuff)\n\n @property\n def width(self):\n return self.right - self.left\n\n @property\n def height(self):\n return self.top - self.bottom\n\n\nclass CRSProjProxy(object):\n def __init__(self, crs):\n self._crs = crs\n\n def __getattr__(self, item):\n return self._crs.GetProjParm(item)\n\n\nclass InvalidCRSError(ValueError):\n pass\n\n\[email protected]({})\ndef _make_crs(crs_str):\n crs = osr.SpatialReference()\n\n # We don't bother checking the return code for errors, as the below ExportToProj4 does a more thorough job.\n crs.SetFromUserInput(crs_str)\n\n # Some will \"validly\" be parsed above, but return OGRERR_CORRUPT_DATA error when used here.\n # see the PROJCS[\"unnamed... doctest below for an example.\n if not crs.ExportToProj4():\n raise InvalidCRSError(\"Not a valid CRS: %r\" % crs_str)\n\n if crs.IsGeographic() == crs.IsProjected():\n raise InvalidCRSError('CRS must be geographic or projected: %r' % crs_str)\n\n return crs\n\n\nclass CRS(object):\n \"\"\"\n Wrapper around `osr.SpatialReference` providing a more pythonic interface\n\n \"\"\"\n\n def __init__(self, crs_str):\n \"\"\"\n\n :param crs_str: string representation of a CRS, often an EPSG code like 'EPSG:4326'\n :raises: InvalidCRSError\n \"\"\"\n if isinstance(crs_str, CRS):\n crs_str = crs_str.crs_str\n self.crs_str = crs_str\n self._crs = _make_crs(crs_str)\n\n def __getitem__(self, item):\n return self._crs.GetAttrValue(item)\n\n def __getstate__(self):\n return {'crs_str': self.crs_str}\n\n def __setstate__(self, state):\n self.__init__(state['crs_str'])\n\n @property\n def wkt(self):\n \"\"\"\n WKT representation of the CRS\n\n :type: str\n \"\"\"\n return self._crs.ExportToWkt()\n\n @property\n def epsg(self):\n \"\"\"\n EPSG Code of the CRS or None\n\n :type: int | None\n \"\"\"\n code = None\n if self.projected:\n code = self._crs.GetAuthorityCode('PROJCS')\n elif self.geographic:\n code = self._crs.GetAuthorityCode('GEOGCS')\n\n return None if code is None else int(code)\n\n @property\n def proj(self):\n return CRSProjProxy(self._crs)\n\n @property\n def semi_major_axis(self):\n return self._crs.GetSemiMajor()\n\n @property\n def semi_minor_axis(self):\n return self._crs.GetSemiMinor()\n\n @property\n def inverse_flattening(self):\n return self._crs.GetInvFlattening()\n\n @property\n def geographic(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._crs.IsGeographic() == 1\n\n @property\n def projected(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._crs.IsProjected() == 1\n\n @property\n def dimensions(self):\n \"\"\"\n List of dimension names of the CRS\n\n :type: (str,str)\n \"\"\"\n if self.geographic:\n return 'latitude', 'longitude'\n\n if self.projected:\n return 'y', 'x'\n\n raise ValueError('Neither projected nor geographic')\n\n @property\n def units(self):\n \"\"\"\n List of dimension units of the CRS\n\n :type: (str,str)\n \"\"\"\n if self.geographic:\n return 'degrees_north', 'degrees_east'\n\n if self.projected:\n return self['UNIT'], self['UNIT']\n\n raise ValueError('Neither projected nor geographic')\n\n def __str__(self):\n return self.crs_str\n\n def __repr__(self):\n return \"CRS('%s')\" % self.crs_str\n\n def __eq__(self, other):\n if isinstance(other, str):\n other = CRS(other)\n gdal_thinks_issame = self._crs.IsSame(other._crs) == 1 # pylint: disable=protected-access\n if gdal_thinks_issame:\n return True\n\n def to_canonincal_proj4(crs):\n return set(crs.ExportToProj4().split() + ['+wktext'])\n # pylint: disable=protected-access\n proj4_repr_is_same = to_canonincal_proj4(self._crs) == to_canonincal_proj4(other._crs)\n return proj4_repr_is_same\n\n def __ne__(self, other):\n if isinstance(other, str):\n other = CRS(other)\n assert isinstance(other, self.__class__)\n return self._crs.IsSame(other._crs) != 1 # pylint: disable=protected-access\n\n\ndef mk_osr_point_transform(src_crs, dst_crs):\n return osr.CoordinateTransformation(src_crs._crs, dst_crs._crs) # pylint: disable=protected-access\n\n\ndef mk_point_transformer(src_crs: CRS, dst_crs: CRS) -> Callable[\n [numpy.ndarray, numpy.ndarray],\n Tuple[numpy.ndarray, numpy.ndarray]]:\n \"\"\"\n\n :returns: Function that maps X,Y -> X',Y' where X,Y are coordinates in\n src_crs stored in ndarray of any shape and X',Y' are same shape\n but in dst CRS.\n \"\"\"\n\n tr = mk_osr_point_transform(src_crs, dst_crs)\n\n def transform(x: numpy.ndarray, y: numpy.ndarray) -> Tuple[numpy.ndarray, numpy.ndarray]:\n assert x.shape == y.shape\n\n xy = numpy.vstack([x.ravel(), y.ravel()])\n xy = numpy.vstack(tr.TransformPoints(xy.T)).T[:2]\n\n x_ = xy[0].reshape(x.shape)\n y_ = xy[1].reshape(y.shape)\n\n # ogr doesn't seem to deal with NaNs properly\n missing = numpy.isnan(x) + numpy.isnan(y)\n x_[missing] = numpy.nan\n y_[missing] = numpy.nan\n\n return (x_, y_)\n\n return transform\n\n###################################################\n# Helper methods to build ogr.Geometry from geojson\n###################################################\n\n\ndef _make_point(pt):\n geom = ogr.Geometry(ogr.wkbPoint)\n # Ignore the third dimension\n geom.AddPoint_2D(*pt[0:2])\n return geom\n\n\ndef _make_multi(type_, maker, coords):\n geom = ogr.Geometry(type_)\n for coord in coords:\n geom.AddGeometryDirectly(maker(coord))\n return geom\n\n\ndef _make_linear(type_, coordinates):\n geom = ogr.Geometry(type_)\n for pt in coordinates:\n # Ignore the third dimension\n geom.AddPoint_2D(*pt[0:2])\n return geom\n\n\ndef _make_multipoint(coordinates):\n return _make_multi(ogr.wkbMultiPoint, _make_point, coordinates)\n\n\ndef _make_line(coordinates):\n return _make_linear(ogr.wkbLineString, coordinates)\n\n\ndef _make_multiline(coordinates):\n return _make_multi(ogr.wkbMultiLineString, _make_line, coordinates)\n\n\ndef _make_polygon(coordinates):\n return _make_multi(ogr.wkbPolygon, functools.partial(_make_linear, ogr.wkbLinearRing), coordinates)\n\n\ndef _make_multipolygon(coordinates):\n return _make_multi(ogr.wkbMultiPolygon, _make_polygon, coordinates)\n\n\n###################################################\n# Helper methods to build ogr.Geometry from geojson\n###################################################\n\n\ndef _get_coordinates(geom):\n \"\"\"\n recursively extract coordinates from geometry\n \"\"\"\n if geom.GetGeometryType() == ogr.wkbPoint:\n return geom.GetPoint_2D(0)\n if geom.GetGeometryType() in [ogr.wkbMultiPoint, ogr.wkbLineString, ogr.wkbLinearRing]:\n return geom.GetPoints()\n else:\n return [_get_coordinates(geom.GetGeometryRef(i)) for i in range(geom.GetGeometryCount())]\n\n\ndef _make_geom_from_ogr(geom, crs):\n result = Geometry.__new__(Geometry)\n result._geom = geom # pylint: disable=protected-access\n result.crs = crs\n return result\n\n\n#############################################\n# Helper methods to wrap ogr.Geometry methods\n#############################################\n\n\ndef _wrap_binary_bool(method):\n @functools.wraps(method, assigned=('__doc__', ))\n def wrapped(self, other):\n assert self.crs == other.crs\n return bool(method(self._geom, other._geom)) # pylint: disable=protected-access\n return wrapped\n\n\ndef _wrap_binary_geom(method):\n @functools.wraps(method, assigned=('__doc__', ))\n def wrapped(self, other):\n assert self.crs == other.crs\n return _make_geom_from_ogr(method(self._geom, other._geom), self.crs) # pylint: disable=protected-access\n return wrapped\n\n\nclass Geometry(object):\n \"\"\"\n 2D Geometry with CRS\n\n Instantiate with a GeoJSON structure\n\n If 3D coordinates are supplied, they are converted to 2D by dropping the Z points.\n\n :type _geom: ogr.Geometry\n :type crs: CRS\n \"\"\"\n _geom_makers = {\n 'Point': _make_point,\n 'MultiPoint': _make_multipoint,\n 'LineString': _make_line,\n 'MultiLineString': _make_multiline,\n 'Polygon': _make_polygon,\n 'MultiPolygon': _make_multipolygon,\n }\n\n _geom_types = {\n ogr.wkbPoint: 'Point',\n ogr.wkbMultiPoint: 'MultiPoint',\n ogr.wkbLineString: 'LineString',\n ogr.wkbMultiLineString: 'MultiLineString',\n ogr.wkbPolygon: 'Polygon',\n ogr.wkbMultiPolygon: 'MultiPolygon',\n }\n\n contains = _wrap_binary_bool(ogr.Geometry.Contains)\n crosses = _wrap_binary_bool(ogr.Geometry.Crosses)\n disjoint = _wrap_binary_bool(ogr.Geometry.Disjoint)\n intersects = _wrap_binary_bool(ogr.Geometry.Intersects)\n touches = _wrap_binary_bool(ogr.Geometry.Touches)\n within = _wrap_binary_bool(ogr.Geometry.Within)\n overlaps = _wrap_binary_bool(ogr.Geometry.Overlaps)\n\n difference = _wrap_binary_geom(ogr.Geometry.Difference)\n intersection = _wrap_binary_geom(ogr.Geometry.Intersection)\n symmetric_difference = _wrap_binary_geom(ogr.Geometry.SymDifference)\n union = _wrap_binary_geom(ogr.Geometry.Union)\n\n def __init__(self, geo, crs=None):\n self.crs = crs\n self._geom = Geometry._geom_makers[geo['type']](geo['coordinates'])\n\n @property\n def type(self):\n return Geometry._geom_types[self._geom.GetGeometryType()]\n\n @property\n def is_empty(self):\n return self._geom.IsEmpty()\n\n @property\n def is_valid(self):\n return self._geom.IsValid()\n\n @property\n def boundary(self):\n return _make_geom_from_ogr(self._geom.Boundary(), self.crs)\n\n @property\n def centroid(self):\n return _make_geom_from_ogr(self._geom.Centroid(), self.crs)\n\n @property\n def coords(self):\n return self._geom.GetPoints()\n\n @property\n def points(self):\n return self.coords\n\n @property\n def length(self):\n return self._geom.Length()\n\n @property\n def area(self):\n return self._geom.GetArea()\n\n @property\n def convex_hull(self):\n return _make_geom_from_ogr(self._geom.ConvexHull(), self.crs)\n\n @property\n def envelope(self):\n minx, maxx, miny, maxy = self._geom.GetEnvelope()\n return BoundingBox(left=minx, right=maxx, bottom=miny, top=maxy)\n\n @property\n def boundingbox(self):\n return self.envelope\n\n @property\n def wkt(self):\n return getattr(self._geom, 'ExportToIsoWkt', self._geom.ExportToWkt)()\n\n @property\n def json(self):\n return self.__geo_interface__\n\n @property\n def __geo_interface__(self):\n return {\n 'type': self.type,\n 'coordinates': _get_coordinates(self._geom)\n }\n\n def segmented(self, resolution):\n \"\"\"\n Possibly add more points to the geometry so that no edge is longer than `resolution`\n \"\"\"\n clone = self._geom.Clone()\n clone.Segmentize(resolution)\n return _make_geom_from_ogr(clone, self.crs)\n\n def interpolate(self, distance):\n \"\"\"\n Returns a point distance units along the line or None if underlying\n geometry doesn't support this operation.\n \"\"\"\n geom = self._geom.Value(distance)\n if geom is None:\n return None\n return _make_geom_from_ogr(geom, self.crs)\n\n def buffer(self, distance, quadsecs=30):\n return _make_geom_from_ogr(self._geom.Buffer(distance, quadsecs), self.crs)\n\n def simplify(self, tolerance):\n return _make_geom_from_ogr(self._geom.Simplify(tolerance), self.crs)\n\n def to_crs(self, crs, resolution=None, wrapdateline=False):\n \"\"\"\n Convert geometry to a different Coordinate Reference System\n\n :param CRS crs: CRS to convert to\n :param float resolution: Subdivide the geometry such it has no segment longer then the given distance.\n :param bool wrapdateline: Attempt to gracefully handle geometry that intersects the dateline\n when converting to geographic projections.\n Currently only works in few specific cases (source CRS is smooth over the dateline).\n :rtype: Geometry\n \"\"\"\n if self.crs == crs:\n return self\n\n if resolution is None:\n resolution = 1 if self.crs.geographic else 100000\n\n transform = mk_osr_point_transform(self.crs, crs)\n clone = self._geom.Clone()\n\n if wrapdateline and crs.geographic:\n rtransform = mk_osr_point_transform(crs, self.crs)\n clone = _chop_along_antimeridian(clone, transform, rtransform)\n\n clone.Segmentize(resolution)\n clone.Transform(transform)\n\n return _make_geom_from_ogr(clone, crs) # pylint: disable=protected-access\n\n def __iter__(self):\n for i in range(self._geom.GetGeometryCount()):\n yield _make_geom_from_ogr(self._geom.GetGeometryRef(i), self.crs)\n\n def __nonzero__(self):\n return not self.is_empty\n\n def __bool__(self):\n return not self.is_empty\n\n def __eq__(self, other):\n return self.crs == other.crs and self._geom.Equal(other._geom) # pylint: disable=protected-access\n\n def __str__(self):\n return 'Geometry(%s, %r)' % (self.__geo_interface__, self.crs)\n\n def __repr__(self):\n return 'Geometry(%s, %s)' % (self._geom, self.crs)\n\n # Implement pickle/unpickle\n # It does work without these two methods, but gdal/ogr prints 'ERROR 1: Empty geometries cannot be constructed'\n # when unpickling, which is quite unpleasant.\n def __getstate__(self):\n return {'geo': self.json, 'crs': self.crs}\n\n def __setstate__(self, state):\n self.__init__(**state)\n\n\ndef _dist(x, y):\n return x*x + y*y\n\n\ndef _chop_along_antimeridian(geom, transform, rtransform):\n \"\"\"\n attempt to cut the geometry along the dateline\n idea borrowed from TransformBeforeAntimeridianToWGS84 with minor mods...\n \"\"\"\n minx, maxx, miny, maxy = geom.GetEnvelope()\n\n midx, midy = (minx+maxx)/2, (miny+maxy)/2\n mid_lon, mid_lat, _ = transform.TransformPoint(midx, midy)\n\n eps = 1.0e-9\n if not _is_smooth_across_dateline(mid_lat, transform, rtransform, eps):\n return geom\n\n left_of_dt = _make_line([(180 - eps, -90), (180 - eps, 90)])\n left_of_dt.Segmentize(1)\n left_of_dt.Transform(rtransform)\n\n if not left_of_dt.Intersects(geom):\n return geom\n\n right_of_dt = _make_line([(-180 + eps, -90), (-180 + eps, 90)])\n right_of_dt.Segmentize(1)\n right_of_dt.Transform(rtransform)\n\n chopper = _make_multipolygon([[[(minx, maxy), (minx, miny)] + left_of_dt.GetPoints() + [(minx, maxy)]],\n [[(maxx, maxy), (maxx, miny)] + right_of_dt.GetPoints() + [(maxx, maxy)]]])\n return geom.Intersection(chopper)\n\n\ndef _is_smooth_across_dateline(mid_lat, transform, rtransform, eps):\n \"\"\"\n test whether the CRS is smooth over the dateline\n idea borrowed from IsAntimeridianProjToWGS84 with minor mods...\n \"\"\"\n left_of_dt_x, left_of_dt_y, _ = rtransform.TransformPoint(180-eps, mid_lat)\n right_of_dt_x, right_of_dt_y, _ = rtransform.TransformPoint(-180+eps, mid_lat)\n\n if _dist(right_of_dt_x-left_of_dt_x, right_of_dt_y-left_of_dt_y) > 1:\n return False\n\n left_of_dt_lon, left_of_dt_lat, _ = transform.TransformPoint(left_of_dt_x, left_of_dt_y)\n right_of_dt_lon, right_of_dt_lat, _ = transform.TransformPoint(right_of_dt_x, right_of_dt_y)\n if (_dist(left_of_dt_lon - 180 + eps, left_of_dt_lat - mid_lat) > 2 * eps or\n _dist(right_of_dt_lon + 180 - eps, right_of_dt_lat - mid_lat) > 2 * eps):\n return False\n\n return True\n\n\n###########################################\n# Helper constructor functions a la shapely\n###########################################\n\n\ndef point(x, y, crs):\n \"\"\"\n Create a 2D Point\n\n >>> point(10, 10, crs=None)\n Geometry(POINT (10 10), None)\n\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'Point', 'coordinates': (x, y)}, crs=crs)\n\n\ndef multipoint(coords, crs):\n \"\"\"\n Create a 2D MultiPoint Geometry\n\n >>> multipoint([(10, 10), (20, 20)], None)\n Geometry(MULTIPOINT (10 10,20 20), None)\n\n :param list coords: list of x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'MultiPoint', 'coordinates': coords}, crs=crs)\n\n\ndef line(coords, crs):\n \"\"\"\n Create a 2D LineString (Connected set of lines)\n\n >>> line([(10, 10), (20, 20), (30, 40)], None)\n Geometry(LINESTRING (10 10,20 20,30 40), None)\n\n :param list coords: list of x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'LineString', 'coordinates': coords}, crs=crs)\n\n\ndef multiline(coords, crs):\n \"\"\"\n Create a 2D MultiLineString (Multiple disconnected sets of lines)\n\n >>> multiline([[(10, 10), (20, 20), (30, 40)], [(50, 60), (70, 80), (90, 99)]], None)\n Geometry(MULTILINESTRING ((10 10,20 20,30 40),(50 60,70 80,90 99)), None)\n\n :param list coords: list of lists of x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'MultiLineString', 'coordinates': coords}, crs=crs)\n\n\ndef polygon(outer, crs, *inners):\n \"\"\"\n Create a 2D Polygon\n\n >>> polygon([(10, 10), (20, 20), (20, 10), (10, 10)], None)\n Geometry(POLYGON ((10 10,20 20,20 10,10 10)), None)\n\n :param list coords: list of 2d x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'Polygon', 'coordinates': (outer, )+inners}, crs=crs)\n\n\ndef multipolygon(coords, crs):\n \"\"\"\n Create a 2D MultiPolygon\n\n >>> multipolygon([[[(10, 10), (20, 20), (20, 10), (10, 10)]], [[(40, 10), (50, 20), (50, 10), (40, 10)]]], None)\n Geometry(MULTIPOLYGON (((10 10,20 20,20 10,10 10)),((40 10,50 20,50 10,40 10))), None)\n\n :param list coords: list of lists of x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'MultiPolygon', 'coordinates': coords}, crs=crs)\n\n\ndef box(left, bottom, right, top, crs):\n \"\"\"\n Create a 2D Box (Polygon)\n\n >>> box(10, 10, 20, 20, None)\n Geometry(POLYGON ((10 10,10 20,20 20,20 10,10 10)), None)\n \"\"\"\n points = [(left, bottom), (left, top), (right, top), (right, bottom), (left, bottom)]\n return polygon(points, crs=crs)\n\n\ndef polygon_from_transform(width, height, transform, crs):\n \"\"\"\n Create a 2D Polygon from an affine transform\n\n :param float width:\n :param float height:\n :param Affine transform:\n :param crs: CRS\n :rtype: Geometry\n \"\"\"\n points = [(0, 0), (0, height), (width, height), (width, 0), (0, 0)]\n transform.itransform(points)\n return polygon(points, crs=crs)\n\n\n###########################################\n# Multi-geometry operations\n###########################################\n\n\ndef unary_union(geoms):\n \"\"\"\n compute union of multiple (multi)polygons efficiently\n \"\"\"\n # pylint: disable=protected-access\n geom = ogr.Geometry(ogr.wkbMultiPolygon)\n crs = None\n for g in geoms:\n if crs:\n assert crs == g.crs\n else:\n crs = g.crs\n if g._geom.GetGeometryType() == ogr.wkbPolygon:\n geom.AddGeometry(g._geom)\n elif g._geom.GetGeometryType() == ogr.wkbMultiPolygon:\n for poly in g._geom:\n geom.AddGeometry(poly)\n else:\n raise ValueError('\"%s\" is not supported' % g.type)\n union = geom.UnionCascaded()\n return _make_geom_from_ogr(union, crs)\n\n\ndef unary_intersection(geoms):\n \"\"\"\n compute intersection of multiple (multi)polygons\n \"\"\"\n return functools.reduce(Geometry.intersection, geoms)\n\n\ndef _align_pix(left, right, res, off):\n \"\"\"\n >>> \"%.2f %d\" % _align_pix(20, 30, 10, 0)\n '20.00 1'\n >>> \"%.2f %d\" % _align_pix(20, 30.5, 10, 0)\n '20.00 1'\n >>> \"%.2f %d\" % _align_pix(20, 31.5, 10, 0)\n '20.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, 10, 3)\n '13.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, 10, -3)\n '17.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, -10, 0)\n '30.00 1'\n >>> \"%.2f %d\" % _align_pix(19.5, 30, -10, 0)\n '30.00 1'\n >>> \"%.2f %d\" % _align_pix(18.5, 30, -10, 0)\n '30.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, -10, 3)\n '33.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, -10, -3)\n '37.00 2'\n \"\"\"\n if res < 0:\n res = -res\n val = math.ceil((right - off) / res) * res + off\n width = max(1, int(math.ceil((val - left - 0.1 * res) / res)))\n else:\n val = math.floor((left - off) / res) * res + off\n width = max(1, int(math.ceil((right - val - 0.1 * res) / res)))\n return val, width\n\n\nclass GeoBox(object):\n \"\"\"\n Defines the location and resolution of a rectangular grid of data,\n including it's :py:class:`CRS`.\n\n :param geometry.CRS crs: Coordinate Reference System\n :param affine.Affine affine: Affine transformation defining the location of the geobox\n \"\"\"\n\n def __init__(self, width, height, affine, crs):\n assert height > 0 and width > 0, \"Can't create GeoBox of zero size\"\n #: :type: int\n self.width = width\n #: :type: int\n self.height = height\n #: :rtype: affine.Affine\n self.affine = affine\n #: :rtype: geometry.Geometry\n self.extent = polygon_from_transform(width, height, affine, crs=crs)\n\n @classmethod\n def from_geopolygon(cls, geopolygon, resolution, crs=None, align=None):\n \"\"\"\n :type geopolygon: geometry.Geometry\n :param resolution: (y_resolution, x_resolution)\n :param geometry.CRS crs: CRS to use, if different from the geopolygon\n :param (float,float) align: Align geobox such that point 'align' lies on the pixel boundary.\n :rtype: GeoBox\n \"\"\"\n align = align or (0.0, 0.0)\n assert 0.0 <= align[1] <= abs(resolution[1]), \"X align must be in [0, abs(x_resolution)] range\"\n assert 0.0 <= align[0] <= abs(resolution[0]), \"Y align must be in [0, abs(y_resolution)] range\"\n\n if crs is None:\n crs = geopolygon.crs\n else:\n geopolygon = geopolygon.to_crs(crs)\n\n bounding_box = geopolygon.boundingbox\n offx, width = _align_pix(bounding_box.left, bounding_box.right, resolution[1], align[1])\n offy, height = _align_pix(bounding_box.bottom, bounding_box.top, resolution[0], align[0])\n affine = (Affine.translation(offx, offy) * Affine.scale(resolution[1], resolution[0]))\n return GeoBox(crs=crs, affine=affine, width=width, height=height)\n\n def buffered(self, ybuff, xbuff):\n \"\"\"\n Produce a tile buffered by ybuff, xbuff (in CRS units)\n \"\"\"\n by, bx = (_round_to_res(buf, res) for buf, res in zip((ybuff, xbuff), self.resolution))\n affine = self.affine * Affine.translation(-bx, -by)\n\n return GeoBox(width=self.width + 2*bx,\n height=self.height + 2*by,\n affine=affine,\n crs=self.crs)\n\n def __getitem__(self, roi):\n if isinstance(roi, int):\n roi = (slice(roi, roi+1), slice(None, None))\n\n if isinstance(roi, slice):\n roi = (roi, slice(None, None))\n\n if len(roi) > 2:\n raise ValueError('Expect 2d slice')\n\n if not all(s.step is None or s.step == 1 for s in roi):\n raise NotImplementedError('scaling not implemented, yet')\n\n roi = roi_normalise(roi, self.shape)\n ty, tx = [s.start for s in roi]\n h, w = roi_shape(roi)\n\n affine = self.affine * Affine.translation(tx, ty)\n\n return GeoBox(width=w, height=h, affine=affine, crs=self.crs)\n\n @property\n def transform(self):\n return self.affine\n\n @property\n def shape(self):\n \"\"\"\n :type: (int,int)\n \"\"\"\n return self.height, self.width\n\n @property\n def crs(self):\n \"\"\"\n :rtype: CRS\n \"\"\"\n return self.extent.crs\n\n @property\n def dimensions(self):\n \"\"\"\n List of dimension names of the GeoBox\n\n :type: (str,str)\n \"\"\"\n return self.crs.dimensions\n\n @property\n def resolution(self):\n \"\"\"\n Resolution in Y,X dimensions\n\n :type: (float,float)\n \"\"\"\n return self.affine.e, self.affine.a\n\n @property\n def alignment(self):\n \"\"\"\n Alignment of pixel boundaries in Y,X dimensions\n\n :type: (float,float)\n \"\"\"\n return self.affine.yoff % abs(self.affine.e), self.affine.xoff % abs(self.affine.a)\n\n @property\n def coordinates(self):\n \"\"\"\n dict of coordinate labels\n\n :type: dict[str,numpy.array]\n \"\"\"\n xs = numpy.arange(self.width) * self.affine.a + (self.affine.c + self.affine.a / 2)\n ys = numpy.arange(self.height) * self.affine.e + (self.affine.f + self.affine.e / 2)\n\n return OrderedDict((dim, Coordinate(labels, units)) for dim, labels, units in zip(self.crs.dimensions,\n (ys, xs), self.crs.units))\n\n @property\n def geographic_extent(self):\n \"\"\"\n :rtype: geometry.Geometry\n \"\"\"\n if self.crs.geographic:\n return self.extent\n return self.extent.to_crs(CRS('EPSG:4326'))\n\n coords = coordinates\n dims = dimensions\n\n def __str__(self):\n return \"GeoBox({})\".format(self.geographic_extent)\n\n def __repr__(self):\n return \"GeoBox({width}, {height}, {affine!r}, {crs})\".format(\n width=self.width,\n height=self.height,\n affine=self.affine,\n crs=self.extent.crs\n )\n\n def __eq__(self, other):\n if not isinstance(other, GeoBox):\n return False\n\n return (self.shape == other.shape\n and self.transform == other.transform\n and self.crs == other.crs)\n\n\ndef scaled_down_geobox(src_geobox, scaler: int):\n \"\"\"Given a source geobox and integer scaler compute geobox of a scaled down image.\n\n Output geobox will be padded when shape is not a multiple of scaler.\n Example: 5x4, scaler=2 -> 3x2\n\n NOTE: here we assume that pixel coordinates are 0,0 at the top-left\n corner of a top-left pixel.\n\n \"\"\"\n assert scaler > 1\n\n H, W = [X//scaler + (1 if X % scaler else 0)\n for X in src_geobox.shape]\n\n # Since 0,0 is at the corner of a pixel, not center, there is no\n # translation between pixel plane coords due to scaling\n A = src_geobox.transform * Affine.scale(scaler, scaler)\n\n return GeoBox(W, H, A, src_geobox.crs)\n\n\ndef _round_to_res(value, res, acc=0.1):\n \"\"\"\n >>> _round_to_res(0.2, 1.0)\n 1\n >>> _round_to_res(0.0, 1.0)\n 0\n >>> _round_to_res(0.05, 1.0)\n 0\n \"\"\"\n res = abs(res)\n return int(math.ceil((value - 0.1 * res) / res))\n\n\ndef intersects(a, b):\n return a.intersects(b) and not a.touches(b)\n", "path": "datacube/utils/geometry/_base.py" } ]
[ { "content": "import functools\nimport math\nfrom collections import namedtuple, OrderedDict\nfrom typing import Tuple, Callable\n\nimport cachetools\nimport numpy\nfrom affine import Affine\nfrom osgeo import ogr, osr\n\nfrom .tools import roi_normalise, roi_shape\n\nCoordinate = namedtuple('Coordinate', ('values', 'units'))\n_BoundingBox = namedtuple('BoundingBox', ('left', 'bottom', 'right', 'top'))\n\n\nclass BoundingBox(_BoundingBox):\n \"\"\"Bounding box, defining extent in cartesian coordinates.\n \"\"\"\n\n def buffered(self, ybuff, xbuff):\n \"\"\"\n Return a new BoundingBox, buffered in the x and y dimensions.\n\n :param ybuff: Y dimension buffering amount\n :param xbuff: X dimension buffering amount\n :return: new BoundingBox\n \"\"\"\n return BoundingBox(left=self.left - xbuff, right=self.right + xbuff,\n top=self.top + ybuff, bottom=self.bottom - ybuff)\n\n @property\n def width(self):\n return self.right - self.left\n\n @property\n def height(self):\n return self.top - self.bottom\n\n\nclass CRSProjProxy(object):\n def __init__(self, crs):\n self._crs = crs\n\n def __getattr__(self, item):\n return self._crs.GetProjParm(item)\n\n\nclass InvalidCRSError(ValueError):\n pass\n\n\[email protected]({})\ndef _make_crs(crs_str):\n crs = osr.SpatialReference()\n\n # We don't bother checking the return code for errors, as the below ExportToProj4 does a more thorough job.\n crs.SetFromUserInput(crs_str)\n\n # Some will \"validly\" be parsed above, but return OGRERR_CORRUPT_DATA error when used here.\n # see the PROJCS[\"unnamed... doctest below for an example.\n if not crs.ExportToProj4():\n raise InvalidCRSError(\"Not a valid CRS: %r\" % crs_str)\n\n if crs.IsGeographic() == crs.IsProjected():\n raise InvalidCRSError('CRS must be geographic or projected: %r' % crs_str)\n\n return crs\n\n\nclass CRS(object):\n \"\"\"\n Wrapper around `osr.SpatialReference` providing a more pythonic interface\n\n \"\"\"\n\n def __init__(self, crs_str):\n \"\"\"\n\n :param crs_str: string representation of a CRS, often an EPSG code like 'EPSG:4326'\n :raises: InvalidCRSError\n \"\"\"\n if isinstance(crs_str, CRS):\n crs_str = crs_str.crs_str\n self.crs_str = crs_str\n self._crs = _make_crs(crs_str)\n\n def __getitem__(self, item):\n return self._crs.GetAttrValue(item)\n\n def __getstate__(self):\n return {'crs_str': self.crs_str}\n\n def __setstate__(self, state):\n self.__init__(state['crs_str'])\n\n @property\n def wkt(self):\n \"\"\"\n WKT representation of the CRS\n\n :type: str\n \"\"\"\n return self._crs.ExportToWkt()\n\n @property\n def epsg(self):\n \"\"\"\n EPSG Code of the CRS or None\n\n :type: int | None\n \"\"\"\n code = None\n if self.projected:\n code = self._crs.GetAuthorityCode('PROJCS')\n elif self.geographic:\n code = self._crs.GetAuthorityCode('GEOGCS')\n\n return None if code is None else int(code)\n\n @property\n def proj(self):\n return CRSProjProxy(self._crs)\n\n @property\n def semi_major_axis(self):\n return self._crs.GetSemiMajor()\n\n @property\n def semi_minor_axis(self):\n return self._crs.GetSemiMinor()\n\n @property\n def inverse_flattening(self):\n return self._crs.GetInvFlattening()\n\n @property\n def geographic(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._crs.IsGeographic() == 1\n\n @property\n def projected(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._crs.IsProjected() == 1\n\n @property\n def dimensions(self):\n \"\"\"\n List of dimension names of the CRS\n\n :type: (str,str)\n \"\"\"\n if self.geographic:\n return 'latitude', 'longitude'\n\n if self.projected:\n return 'y', 'x'\n\n raise ValueError('Neither projected nor geographic')\n\n @property\n def units(self):\n \"\"\"\n List of dimension units of the CRS\n\n :type: (str,str)\n \"\"\"\n if self.geographic:\n return 'degrees_north', 'degrees_east'\n\n if self.projected:\n return self['UNIT'], self['UNIT']\n\n raise ValueError('Neither projected nor geographic')\n\n def __str__(self):\n return self.crs_str\n\n def __repr__(self):\n return \"CRS('%s')\" % self.crs_str\n\n def __eq__(self, other):\n if isinstance(other, str):\n other = CRS(other)\n gdal_thinks_issame = self._crs.IsSame(other._crs) == 1 # pylint: disable=protected-access\n if gdal_thinks_issame:\n return True\n\n def to_canonincal_proj4(crs):\n return set(crs.ExportToProj4().split() + ['+wktext'])\n # pylint: disable=protected-access\n proj4_repr_is_same = to_canonincal_proj4(self._crs) == to_canonincal_proj4(other._crs)\n return proj4_repr_is_same\n\n def __ne__(self, other):\n if isinstance(other, str):\n other = CRS(other)\n assert isinstance(other, self.__class__)\n return self._crs.IsSame(other._crs) != 1 # pylint: disable=protected-access\n\n\ndef mk_osr_point_transform(src_crs, dst_crs):\n return osr.CoordinateTransformation(src_crs._crs, dst_crs._crs) # pylint: disable=protected-access\n\n\ndef mk_point_transformer(src_crs: CRS, dst_crs: CRS) -> Callable[\n [numpy.ndarray, numpy.ndarray],\n Tuple[numpy.ndarray, numpy.ndarray]]:\n \"\"\"\n\n :returns: Function that maps X,Y -> X',Y' where X,Y are coordinates in\n src_crs stored in ndarray of any shape and X',Y' are same shape\n but in dst CRS.\n \"\"\"\n\n tr = mk_osr_point_transform(src_crs, dst_crs)\n\n def transform(x: numpy.ndarray, y: numpy.ndarray) -> Tuple[numpy.ndarray, numpy.ndarray]:\n assert x.shape == y.shape\n\n xy = numpy.vstack([x.ravel(), y.ravel()])\n xy = numpy.vstack(tr.TransformPoints(xy.T)).T[:2]\n\n x_ = xy[0].reshape(x.shape)\n y_ = xy[1].reshape(y.shape)\n\n # ogr doesn't seem to deal with NaNs properly\n missing = numpy.isnan(x) + numpy.isnan(y)\n x_[missing] = numpy.nan\n y_[missing] = numpy.nan\n\n return (x_, y_)\n\n return transform\n\n###################################################\n# Helper methods to build ogr.Geometry from geojson\n###################################################\n\n\ndef _make_point(pt):\n geom = ogr.Geometry(ogr.wkbPoint)\n # Ignore the third dimension\n geom.AddPoint_2D(*pt[0:2])\n return geom\n\n\ndef _make_multi(type_, maker, coords):\n geom = ogr.Geometry(type_)\n for coord in coords:\n geom.AddGeometryDirectly(maker(coord))\n return geom\n\n\ndef _make_linear(type_, coordinates):\n geom = ogr.Geometry(type_)\n for pt in coordinates:\n # Ignore the third dimension\n geom.AddPoint_2D(*pt[0:2])\n return geom\n\n\ndef _make_multipoint(coordinates):\n return _make_multi(ogr.wkbMultiPoint, _make_point, coordinates)\n\n\ndef _make_line(coordinates):\n return _make_linear(ogr.wkbLineString, coordinates)\n\n\ndef _make_multiline(coordinates):\n return _make_multi(ogr.wkbMultiLineString, _make_line, coordinates)\n\n\ndef _make_polygon(coordinates):\n return _make_multi(ogr.wkbPolygon, functools.partial(_make_linear, ogr.wkbLinearRing), coordinates)\n\n\ndef _make_multipolygon(coordinates):\n return _make_multi(ogr.wkbMultiPolygon, _make_polygon, coordinates)\n\n\n###################################################\n# Helper methods to build ogr.Geometry from geojson\n###################################################\n\n\ndef _get_coordinates(geom):\n \"\"\"\n recursively extract coordinates from geometry\n \"\"\"\n if geom.GetGeometryType() == ogr.wkbPoint:\n return geom.GetPoint_2D(0)\n if geom.GetGeometryType() in [ogr.wkbMultiPoint, ogr.wkbLineString, ogr.wkbLinearRing]:\n return geom.GetPoints()\n else:\n return [_get_coordinates(geom.GetGeometryRef(i)) for i in range(geom.GetGeometryCount())]\n\n\ndef _make_geom_from_ogr(geom, crs):\n if geom is None:\n return None\n result = Geometry.__new__(Geometry)\n result._geom = geom # pylint: disable=protected-access\n result.crs = crs\n return result\n\n\n#############################################\n# Helper methods to wrap ogr.Geometry methods\n#############################################\n\n\ndef _wrap_binary_bool(method):\n @functools.wraps(method, assigned=('__doc__', ))\n def wrapped(self, other):\n assert self.crs == other.crs\n return bool(method(self._geom, other._geom)) # pylint: disable=protected-access\n return wrapped\n\n\ndef _wrap_binary_geom(method):\n @functools.wraps(method, assigned=('__doc__', ))\n def wrapped(self, other):\n assert self.crs == other.crs\n return _make_geom_from_ogr(method(self._geom, other._geom), self.crs) # pylint: disable=protected-access\n return wrapped\n\n\nclass Geometry(object):\n \"\"\"\n 2D Geometry with CRS\n\n Instantiate with a GeoJSON structure\n\n If 3D coordinates are supplied, they are converted to 2D by dropping the Z points.\n\n :type _geom: ogr.Geometry\n :type crs: CRS\n \"\"\"\n _geom_makers = {\n 'Point': _make_point,\n 'MultiPoint': _make_multipoint,\n 'LineString': _make_line,\n 'MultiLineString': _make_multiline,\n 'Polygon': _make_polygon,\n 'MultiPolygon': _make_multipolygon,\n }\n\n _geom_types = {\n ogr.wkbPoint: 'Point',\n ogr.wkbMultiPoint: 'MultiPoint',\n ogr.wkbLineString: 'LineString',\n ogr.wkbMultiLineString: 'MultiLineString',\n ogr.wkbPolygon: 'Polygon',\n ogr.wkbMultiPolygon: 'MultiPolygon',\n }\n\n contains = _wrap_binary_bool(ogr.Geometry.Contains)\n crosses = _wrap_binary_bool(ogr.Geometry.Crosses)\n disjoint = _wrap_binary_bool(ogr.Geometry.Disjoint)\n intersects = _wrap_binary_bool(ogr.Geometry.Intersects)\n touches = _wrap_binary_bool(ogr.Geometry.Touches)\n within = _wrap_binary_bool(ogr.Geometry.Within)\n overlaps = _wrap_binary_bool(ogr.Geometry.Overlaps)\n\n difference = _wrap_binary_geom(ogr.Geometry.Difference)\n intersection = _wrap_binary_geom(ogr.Geometry.Intersection)\n symmetric_difference = _wrap_binary_geom(ogr.Geometry.SymDifference)\n union = _wrap_binary_geom(ogr.Geometry.Union)\n\n def __init__(self, geo, crs=None):\n self.crs = crs\n self._geom = Geometry._geom_makers[geo['type']](geo['coordinates'])\n\n @property\n def type(self):\n return Geometry._geom_types[self._geom.GetGeometryType()]\n\n @property\n def is_empty(self):\n return self._geom.IsEmpty()\n\n @property\n def is_valid(self):\n return self._geom.IsValid()\n\n @property\n def boundary(self):\n return _make_geom_from_ogr(self._geom.Boundary(), self.crs)\n\n @property\n def centroid(self):\n return _make_geom_from_ogr(self._geom.Centroid(), self.crs)\n\n @property\n def coords(self):\n return self._geom.GetPoints()\n\n @property\n def points(self):\n return self.coords\n\n @property\n def length(self):\n return self._geom.Length()\n\n @property\n def area(self):\n return self._geom.GetArea()\n\n @property\n def convex_hull(self):\n return _make_geom_from_ogr(self._geom.ConvexHull(), self.crs)\n\n @property\n def envelope(self):\n minx, maxx, miny, maxy = self._geom.GetEnvelope()\n return BoundingBox(left=minx, right=maxx, bottom=miny, top=maxy)\n\n @property\n def boundingbox(self):\n return self.envelope\n\n @property\n def wkt(self):\n return getattr(self._geom, 'ExportToIsoWkt', self._geom.ExportToWkt)()\n\n @property\n def json(self):\n return self.__geo_interface__\n\n @property\n def __geo_interface__(self):\n return {\n 'type': self.type,\n 'coordinates': _get_coordinates(self._geom)\n }\n\n def segmented(self, resolution):\n \"\"\"\n Possibly add more points to the geometry so that no edge is longer than `resolution`\n \"\"\"\n clone = self._geom.Clone()\n clone.Segmentize(resolution)\n return _make_geom_from_ogr(clone, self.crs)\n\n def interpolate(self, distance):\n \"\"\"\n Returns a point distance units along the line or None if underlying\n geometry doesn't support this operation.\n \"\"\"\n geom = self._geom.Value(distance)\n if geom is None:\n return None\n return _make_geom_from_ogr(geom, self.crs)\n\n def buffer(self, distance, quadsecs=30):\n return _make_geom_from_ogr(self._geom.Buffer(distance, quadsecs), self.crs)\n\n def simplify(self, tolerance):\n return _make_geom_from_ogr(self._geom.Simplify(tolerance), self.crs)\n\n def to_crs(self, crs, resolution=None, wrapdateline=False):\n \"\"\"\n Convert geometry to a different Coordinate Reference System\n\n :param CRS crs: CRS to convert to\n :param float resolution: Subdivide the geometry such it has no segment longer then the given distance.\n :param bool wrapdateline: Attempt to gracefully handle geometry that intersects the dateline\n when converting to geographic projections.\n Currently only works in few specific cases (source CRS is smooth over the dateline).\n :rtype: Geometry\n \"\"\"\n if self.crs == crs:\n return self\n\n if resolution is None:\n resolution = 1 if self.crs.geographic else 100000\n\n transform = mk_osr_point_transform(self.crs, crs)\n clone = self._geom.Clone()\n\n if wrapdateline and crs.geographic:\n rtransform = mk_osr_point_transform(crs, self.crs)\n clone = _chop_along_antimeridian(clone, transform, rtransform)\n\n clone.Segmentize(resolution)\n clone.Transform(transform)\n\n return _make_geom_from_ogr(clone, crs) # pylint: disable=protected-access\n\n def __iter__(self):\n for i in range(self._geom.GetGeometryCount()):\n yield _make_geom_from_ogr(self._geom.GetGeometryRef(i), self.crs)\n\n def __nonzero__(self):\n return not self.is_empty\n\n def __bool__(self):\n return not self.is_empty\n\n def __eq__(self, other):\n return self.crs == other.crs and self._geom.Equal(other._geom) # pylint: disable=protected-access\n\n def __str__(self):\n return 'Geometry(%s, %r)' % (self.__geo_interface__, self.crs)\n\n def __repr__(self):\n return 'Geometry(%s, %s)' % (self._geom, self.crs)\n\n # Implement pickle/unpickle\n # It does work without these two methods, but gdal/ogr prints 'ERROR 1: Empty geometries cannot be constructed'\n # when unpickling, which is quite unpleasant.\n def __getstate__(self):\n return {'geo': self.json, 'crs': self.crs}\n\n def __setstate__(self, state):\n self.__init__(**state)\n\n\ndef _dist(x, y):\n return x*x + y*y\n\n\ndef _chop_along_antimeridian(geom, transform, rtransform):\n \"\"\"\n attempt to cut the geometry along the dateline\n idea borrowed from TransformBeforeAntimeridianToWGS84 with minor mods...\n \"\"\"\n minx, maxx, miny, maxy = geom.GetEnvelope()\n\n midx, midy = (minx+maxx)/2, (miny+maxy)/2\n mid_lon, mid_lat, _ = transform.TransformPoint(midx, midy)\n\n eps = 1.0e-9\n if not _is_smooth_across_dateline(mid_lat, transform, rtransform, eps):\n return geom\n\n left_of_dt = _make_line([(180 - eps, -90), (180 - eps, 90)])\n left_of_dt.Segmentize(1)\n left_of_dt.Transform(rtransform)\n\n if not left_of_dt.Intersects(geom):\n return geom\n\n right_of_dt = _make_line([(-180 + eps, -90), (-180 + eps, 90)])\n right_of_dt.Segmentize(1)\n right_of_dt.Transform(rtransform)\n\n chopper = _make_multipolygon([[[(minx, maxy), (minx, miny)] + left_of_dt.GetPoints() + [(minx, maxy)]],\n [[(maxx, maxy), (maxx, miny)] + right_of_dt.GetPoints() + [(maxx, maxy)]]])\n return geom.Intersection(chopper)\n\n\ndef _is_smooth_across_dateline(mid_lat, transform, rtransform, eps):\n \"\"\"\n test whether the CRS is smooth over the dateline\n idea borrowed from IsAntimeridianProjToWGS84 with minor mods...\n \"\"\"\n left_of_dt_x, left_of_dt_y, _ = rtransform.TransformPoint(180-eps, mid_lat)\n right_of_dt_x, right_of_dt_y, _ = rtransform.TransformPoint(-180+eps, mid_lat)\n\n if _dist(right_of_dt_x-left_of_dt_x, right_of_dt_y-left_of_dt_y) > 1:\n return False\n\n left_of_dt_lon, left_of_dt_lat, _ = transform.TransformPoint(left_of_dt_x, left_of_dt_y)\n right_of_dt_lon, right_of_dt_lat, _ = transform.TransformPoint(right_of_dt_x, right_of_dt_y)\n if (_dist(left_of_dt_lon - 180 + eps, left_of_dt_lat - mid_lat) > 2 * eps or\n _dist(right_of_dt_lon + 180 - eps, right_of_dt_lat - mid_lat) > 2 * eps):\n return False\n\n return True\n\n\n###########################################\n# Helper constructor functions a la shapely\n###########################################\n\n\ndef point(x, y, crs):\n \"\"\"\n Create a 2D Point\n\n >>> point(10, 10, crs=None)\n Geometry(POINT (10 10), None)\n\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'Point', 'coordinates': (x, y)}, crs=crs)\n\n\ndef multipoint(coords, crs):\n \"\"\"\n Create a 2D MultiPoint Geometry\n\n >>> multipoint([(10, 10), (20, 20)], None)\n Geometry(MULTIPOINT (10 10,20 20), None)\n\n :param list coords: list of x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'MultiPoint', 'coordinates': coords}, crs=crs)\n\n\ndef line(coords, crs):\n \"\"\"\n Create a 2D LineString (Connected set of lines)\n\n >>> line([(10, 10), (20, 20), (30, 40)], None)\n Geometry(LINESTRING (10 10,20 20,30 40), None)\n\n :param list coords: list of x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'LineString', 'coordinates': coords}, crs=crs)\n\n\ndef multiline(coords, crs):\n \"\"\"\n Create a 2D MultiLineString (Multiple disconnected sets of lines)\n\n >>> multiline([[(10, 10), (20, 20), (30, 40)], [(50, 60), (70, 80), (90, 99)]], None)\n Geometry(MULTILINESTRING ((10 10,20 20,30 40),(50 60,70 80,90 99)), None)\n\n :param list coords: list of lists of x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'MultiLineString', 'coordinates': coords}, crs=crs)\n\n\ndef polygon(outer, crs, *inners):\n \"\"\"\n Create a 2D Polygon\n\n >>> polygon([(10, 10), (20, 20), (20, 10), (10, 10)], None)\n Geometry(POLYGON ((10 10,20 20,20 10,10 10)), None)\n\n :param list coords: list of 2d x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'Polygon', 'coordinates': (outer, )+inners}, crs=crs)\n\n\ndef multipolygon(coords, crs):\n \"\"\"\n Create a 2D MultiPolygon\n\n >>> multipolygon([[[(10, 10), (20, 20), (20, 10), (10, 10)]], [[(40, 10), (50, 20), (50, 10), (40, 10)]]], None)\n Geometry(MULTIPOLYGON (((10 10,20 20,20 10,10 10)),((40 10,50 20,50 10,40 10))), None)\n\n :param list coords: list of lists of x,y coordinate tuples\n :rtype: Geometry\n \"\"\"\n return Geometry({'type': 'MultiPolygon', 'coordinates': coords}, crs=crs)\n\n\ndef box(left, bottom, right, top, crs):\n \"\"\"\n Create a 2D Box (Polygon)\n\n >>> box(10, 10, 20, 20, None)\n Geometry(POLYGON ((10 10,10 20,20 20,20 10,10 10)), None)\n \"\"\"\n points = [(left, bottom), (left, top), (right, top), (right, bottom), (left, bottom)]\n return polygon(points, crs=crs)\n\n\ndef polygon_from_transform(width, height, transform, crs):\n \"\"\"\n Create a 2D Polygon from an affine transform\n\n :param float width:\n :param float height:\n :param Affine transform:\n :param crs: CRS\n :rtype: Geometry\n \"\"\"\n points = [(0, 0), (0, height), (width, height), (width, 0), (0, 0)]\n transform.itransform(points)\n return polygon(points, crs=crs)\n\n\n###########################################\n# Multi-geometry operations\n###########################################\n\n\ndef unary_union(geoms):\n \"\"\"\n compute union of multiple (multi)polygons efficiently\n \"\"\"\n # pylint: disable=protected-access\n geom = ogr.Geometry(ogr.wkbMultiPolygon)\n crs = None\n for g in geoms:\n if crs:\n assert crs == g.crs\n else:\n crs = g.crs\n if g._geom.GetGeometryType() == ogr.wkbPolygon:\n geom.AddGeometry(g._geom)\n elif g._geom.GetGeometryType() == ogr.wkbMultiPolygon:\n for poly in g._geom:\n geom.AddGeometry(poly)\n else:\n raise ValueError('\"%s\" is not supported' % g.type)\n union = geom.UnionCascaded()\n return _make_geom_from_ogr(union, crs)\n\n\ndef unary_intersection(geoms):\n \"\"\"\n compute intersection of multiple (multi)polygons\n \"\"\"\n return functools.reduce(Geometry.intersection, geoms)\n\n\ndef _align_pix(left, right, res, off):\n \"\"\"\n >>> \"%.2f %d\" % _align_pix(20, 30, 10, 0)\n '20.00 1'\n >>> \"%.2f %d\" % _align_pix(20, 30.5, 10, 0)\n '20.00 1'\n >>> \"%.2f %d\" % _align_pix(20, 31.5, 10, 0)\n '20.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, 10, 3)\n '13.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, 10, -3)\n '17.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, -10, 0)\n '30.00 1'\n >>> \"%.2f %d\" % _align_pix(19.5, 30, -10, 0)\n '30.00 1'\n >>> \"%.2f %d\" % _align_pix(18.5, 30, -10, 0)\n '30.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, -10, 3)\n '33.00 2'\n >>> \"%.2f %d\" % _align_pix(20, 30, -10, -3)\n '37.00 2'\n \"\"\"\n if res < 0:\n res = -res\n val = math.ceil((right - off) / res) * res + off\n width = max(1, int(math.ceil((val - left - 0.1 * res) / res)))\n else:\n val = math.floor((left - off) / res) * res + off\n width = max(1, int(math.ceil((right - val - 0.1 * res) / res)))\n return val, width\n\n\nclass GeoBox(object):\n \"\"\"\n Defines the location and resolution of a rectangular grid of data,\n including it's :py:class:`CRS`.\n\n :param geometry.CRS crs: Coordinate Reference System\n :param affine.Affine affine: Affine transformation defining the location of the geobox\n \"\"\"\n\n def __init__(self, width, height, affine, crs):\n assert height > 0 and width > 0, \"Can't create GeoBox of zero size\"\n #: :type: int\n self.width = width\n #: :type: int\n self.height = height\n #: :rtype: affine.Affine\n self.affine = affine\n #: :rtype: geometry.Geometry\n self.extent = polygon_from_transform(width, height, affine, crs=crs)\n\n @classmethod\n def from_geopolygon(cls, geopolygon, resolution, crs=None, align=None):\n \"\"\"\n :type geopolygon: geometry.Geometry\n :param resolution: (y_resolution, x_resolution)\n :param geometry.CRS crs: CRS to use, if different from the geopolygon\n :param (float,float) align: Align geobox such that point 'align' lies on the pixel boundary.\n :rtype: GeoBox\n \"\"\"\n align = align or (0.0, 0.0)\n assert 0.0 <= align[1] <= abs(resolution[1]), \"X align must be in [0, abs(x_resolution)] range\"\n assert 0.0 <= align[0] <= abs(resolution[0]), \"Y align must be in [0, abs(y_resolution)] range\"\n\n if crs is None:\n crs = geopolygon.crs\n else:\n geopolygon = geopolygon.to_crs(crs)\n\n bounding_box = geopolygon.boundingbox\n offx, width = _align_pix(bounding_box.left, bounding_box.right, resolution[1], align[1])\n offy, height = _align_pix(bounding_box.bottom, bounding_box.top, resolution[0], align[0])\n affine = (Affine.translation(offx, offy) * Affine.scale(resolution[1], resolution[0]))\n return GeoBox(crs=crs, affine=affine, width=width, height=height)\n\n def buffered(self, ybuff, xbuff):\n \"\"\"\n Produce a tile buffered by ybuff, xbuff (in CRS units)\n \"\"\"\n by, bx = (_round_to_res(buf, res) for buf, res in zip((ybuff, xbuff), self.resolution))\n affine = self.affine * Affine.translation(-bx, -by)\n\n return GeoBox(width=self.width + 2*bx,\n height=self.height + 2*by,\n affine=affine,\n crs=self.crs)\n\n def __getitem__(self, roi):\n if isinstance(roi, int):\n roi = (slice(roi, roi+1), slice(None, None))\n\n if isinstance(roi, slice):\n roi = (roi, slice(None, None))\n\n if len(roi) > 2:\n raise ValueError('Expect 2d slice')\n\n if not all(s.step is None or s.step == 1 for s in roi):\n raise NotImplementedError('scaling not implemented, yet')\n\n roi = roi_normalise(roi, self.shape)\n ty, tx = [s.start for s in roi]\n h, w = roi_shape(roi)\n\n affine = self.affine * Affine.translation(tx, ty)\n\n return GeoBox(width=w, height=h, affine=affine, crs=self.crs)\n\n @property\n def transform(self):\n return self.affine\n\n @property\n def shape(self):\n \"\"\"\n :type: (int,int)\n \"\"\"\n return self.height, self.width\n\n @property\n def crs(self):\n \"\"\"\n :rtype: CRS\n \"\"\"\n return self.extent.crs\n\n @property\n def dimensions(self):\n \"\"\"\n List of dimension names of the GeoBox\n\n :type: (str,str)\n \"\"\"\n return self.crs.dimensions\n\n @property\n def resolution(self):\n \"\"\"\n Resolution in Y,X dimensions\n\n :type: (float,float)\n \"\"\"\n return self.affine.e, self.affine.a\n\n @property\n def alignment(self):\n \"\"\"\n Alignment of pixel boundaries in Y,X dimensions\n\n :type: (float,float)\n \"\"\"\n return self.affine.yoff % abs(self.affine.e), self.affine.xoff % abs(self.affine.a)\n\n @property\n def coordinates(self):\n \"\"\"\n dict of coordinate labels\n\n :type: dict[str,numpy.array]\n \"\"\"\n xs = numpy.arange(self.width) * self.affine.a + (self.affine.c + self.affine.a / 2)\n ys = numpy.arange(self.height) * self.affine.e + (self.affine.f + self.affine.e / 2)\n\n return OrderedDict((dim, Coordinate(labels, units)) for dim, labels, units in zip(self.crs.dimensions,\n (ys, xs), self.crs.units))\n\n @property\n def geographic_extent(self):\n \"\"\"\n :rtype: geometry.Geometry\n \"\"\"\n if self.crs.geographic:\n return self.extent\n return self.extent.to_crs(CRS('EPSG:4326'))\n\n coords = coordinates\n dims = dimensions\n\n def __str__(self):\n return \"GeoBox({})\".format(self.geographic_extent)\n\n def __repr__(self):\n return \"GeoBox({width}, {height}, {affine!r}, {crs})\".format(\n width=self.width,\n height=self.height,\n affine=self.affine,\n crs=self.extent.crs\n )\n\n def __eq__(self, other):\n if not isinstance(other, GeoBox):\n return False\n\n return (self.shape == other.shape\n and self.transform == other.transform\n and self.crs == other.crs)\n\n\ndef scaled_down_geobox(src_geobox, scaler: int):\n \"\"\"Given a source geobox and integer scaler compute geobox of a scaled down image.\n\n Output geobox will be padded when shape is not a multiple of scaler.\n Example: 5x4, scaler=2 -> 3x2\n\n NOTE: here we assume that pixel coordinates are 0,0 at the top-left\n corner of a top-left pixel.\n\n \"\"\"\n assert scaler > 1\n\n H, W = [X//scaler + (1 if X % scaler else 0)\n for X in src_geobox.shape]\n\n # Since 0,0 is at the corner of a pixel, not center, there is no\n # translation between pixel plane coords due to scaling\n A = src_geobox.transform * Affine.scale(scaler, scaler)\n\n return GeoBox(W, H, A, src_geobox.crs)\n\n\ndef _round_to_res(value, res, acc=0.1):\n \"\"\"\n >>> _round_to_res(0.2, 1.0)\n 1\n >>> _round_to_res(0.0, 1.0)\n 0\n >>> _round_to_res(0.05, 1.0)\n 0\n \"\"\"\n res = abs(res)\n return int(math.ceil((value - 0.1 * res) / res))\n\n\ndef intersects(a, b):\n return a.intersects(b) and not a.touches(b)\n", "path": "datacube/utils/geometry/_base.py" } ]
diff --git a/datacube/utils/geometry/_base.py b/datacube/utils/geometry/_base.py index 79611d46cc..b9ec36a6e3 100644 --- a/datacube/utils/geometry/_base.py +++ b/datacube/utils/geometry/_base.py @@ -303,6 +303,8 @@ def _get_coordinates(geom): def _make_geom_from_ogr(geom, crs): + if geom is None: + return None result = Geometry.__new__(Geometry) result._geom = geom # pylint: disable=protected-access result.crs = crs diff --git a/tests/test_geometry.py b/tests/test_geometry.py index 5571f43401..6995b6ffc0 100644 --- a/tests/test_geometry.py +++ b/tests/test_geometry.py @@ -199,6 +199,8 @@ def test_unary_union(): assert union4.type == 'Polygon' assert union4.area == 2.5 * box1.area + assert geometry.unary_union([]) is None + with pytest.raises(ValueError): pt = geometry.point(6, 7, epsg4326) geometry.unary_union([pt, pt])
frappe__frappe-15915
no_copy option not available in customize form field <!-- Welcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following: 1. This tracker should only be used to report bugs and request features / enhancements to Frappe - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet 2. Use the search function before creating a new issue. Duplicates will be closed and directed to the original discussion. 3. When making a bug report, make sure you provide all required information. The easier it is for maintainers to reproduce, the faster it'll be fixed. 4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR 😉 --> ## Description of the issue ## Context information (for bug reports) **Output of `bench version`** ``` (paste here) ``` ## Steps to reproduce the issue 1. 2. 3. ### Observed result no copy is a field property in doctype, but in customize form, the no copy option is not available for the field. ### Expected result make no copy as available property of the field in customize form ### Stacktrace / full error message ``` (paste here) ``` ## Additional information OS version / distribution, `Frappe` install method, etc.
[ { "content": "# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See LICENSE\n\n\"\"\"\n\tCustomize Form is a Single DocType used to mask the Property Setter\n\tThus providing a better UI from user perspective\n\"\"\"\nimport json\nimport frappe\nimport frappe.translate\nfrom frappe import _\nfrom frappe.utils import cint\nfrom frappe.model.document import Document\nfrom frappe.model import no_value_fields, core_doctypes_list\nfrom frappe.core.doctype.doctype.doctype import validate_fields_for_doctype, check_email_append_to\nfrom frappe.custom.doctype.custom_field.custom_field import create_custom_field\nfrom frappe.custom.doctype.property_setter.property_setter import delete_property_setter\nfrom frappe.model.docfield import supports_translation\nfrom frappe.core.doctype.doctype.doctype import validate_series\n\n\nclass CustomizeForm(Document):\n\tdef on_update(self):\n\t\tfrappe.db.delete(\"Singles\", {\"doctype\": \"Customize Form\"})\n\t\tfrappe.db.delete(\"Customize Form Field\")\n\n\[email protected]()\n\tdef fetch_to_customize(self):\n\t\tself.clear_existing_doc()\n\t\tif not self.doc_type:\n\t\t\treturn\n\n\t\tmeta = frappe.get_meta(self.doc_type)\n\n\t\tself.validate_doctype(meta)\n\n\t\t# load the meta properties on the customize (self) object\n\t\tself.load_properties(meta)\n\n\t\t# load custom translation\n\t\ttranslation = self.get_name_translation()\n\t\tself.label = translation.translated_text if translation else ''\n\n\t\tself.create_auto_repeat_custom_field_if_required(meta)\n\n\t\t# NOTE doc (self) is sent to clientside by run_method\n\n\tdef validate_doctype(self, meta):\n\t\t'''\n\t\tCheck if the doctype is allowed to be customized.\n\t\t'''\n\t\tif self.doc_type in core_doctypes_list:\n\t\t\tfrappe.throw(_(\"Core DocTypes cannot be customized.\"))\n\n\t\tif meta.issingle:\n\t\t\tfrappe.throw(_(\"Single DocTypes cannot be customized.\"))\n\n\t\tif meta.custom:\n\t\t\tfrappe.throw(_(\"Only standard DocTypes are allowed to be customized from Customize Form.\"))\n\n\tdef load_properties(self, meta):\n\t\t'''\n\t\tLoad the customize object (this) with the metadata properties\n\t\t'''\n\t\t# doctype properties\n\t\tfor prop in doctype_properties:\n\t\t\tself.set(prop, meta.get(prop))\n\n\t\tfor d in meta.get(\"fields\"):\n\t\t\tnew_d = {\"fieldname\": d.fieldname, \"is_custom_field\": d.get(\"is_custom_field\"), \"name\": d.name}\n\t\t\tfor prop in docfield_properties:\n\t\t\t\tnew_d[prop] = d.get(prop)\n\t\t\tself.append(\"fields\", new_d)\n\n\t\tfor fieldname in ('links', 'actions', 'states'):\n\t\t\tfor d in meta.get(fieldname):\n\t\t\t\tself.append(fieldname, d)\n\n\tdef create_auto_repeat_custom_field_if_required(self, meta):\n\t\t'''\n\t\tCreate auto repeat custom field if it's not already present\n\t\t'''\n\t\tif self.allow_auto_repeat:\n\t\t\tall_fields = [df.fieldname for df in meta.fields]\n\n\t\t\tif \"auto_repeat\" in all_fields:\n\t\t\t\treturn\n\n\t\t\tinsert_after = self.fields[len(self.fields) - 1].fieldname\n\t\t\tcreate_custom_field(self.doc_type, dict(\n\t\t\t\tfieldname='auto_repeat',\n\t\t\t\tlabel='Auto Repeat',\n\t\t\t\tfieldtype='Link',\n\t\t\t\toptions='Auto Repeat',\n\t\t\t\tinsert_after=insert_after,\n\t\t\t\tread_only=1, no_copy=1, print_hide=1\n\t\t\t))\n\n\n\tdef get_name_translation(self):\n\t\t'''Get translation object if exists of current doctype name in the default language'''\n\t\treturn frappe.get_value('Translation', {\n\t\t\t\t'source_text': self.doc_type,\n\t\t\t\t'language': frappe.local.lang or 'en'\n\t\t\t}, ['name', 'translated_text'], as_dict=True)\n\n\tdef set_name_translation(self):\n\t\t'''Create, update custom translation for this doctype'''\n\t\tcurrent = self.get_name_translation()\n\t\tif not self.label:\n\t\t\tif current:\n\t\t\t\t# clear translation\n\t\t\t\tfrappe.delete_doc('Translation', current.name)\n\t\t\treturn\n\n\t\tif not current:\n\t\t\tfrappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": 'Translation',\n\t\t\t\t\t\"source_text\": self.doc_type,\n\t\t\t\t\t\"translated_text\": self.label,\n\t\t\t\t\t\"language_code\": frappe.local.lang or 'en'\n\t\t\t\t}\n\t\t\t).insert()\n\t\t\treturn\n\n\t\tif self.label != current.translated_text:\n\t\t\tfrappe.db.set_value('Translation', current.name, 'translated_text', self.label)\n\t\t\tfrappe.translate.clear_cache()\n\n\tdef clear_existing_doc(self):\n\t\tdoc_type = self.doc_type\n\n\t\tfor fieldname in self.meta.get_valid_columns():\n\t\t\tself.set(fieldname, None)\n\n\t\tfor df in self.meta.get_table_fields():\n\t\t\tself.set(df.fieldname, [])\n\n\t\tself.doc_type = doc_type\n\t\tself.name = \"Customize Form\"\n\n\[email protected]()\n\tdef save_customization(self):\n\t\tif not self.doc_type:\n\t\t\treturn\n\t\tvalidate_series(self, self.autoname, self.doc_type)\n\t\tself.flags.update_db = False\n\t\tself.flags.rebuild_doctype_for_global_search = False\n\t\tself.set_property_setters()\n\t\tself.update_custom_fields()\n\t\tself.set_name_translation()\n\t\tvalidate_fields_for_doctype(self.doc_type)\n\t\tcheck_email_append_to(self)\n\n\t\tif self.flags.update_db:\n\t\t\tfrappe.db.updatedb(self.doc_type)\n\n\t\tif not hasattr(self, 'hide_success') or not self.hide_success:\n\t\t\tfrappe.msgprint(_(\"{0} updated\").format(_(self.doc_type)), alert=True)\n\t\tfrappe.clear_cache(doctype=self.doc_type)\n\t\tself.fetch_to_customize()\n\n\t\tif self.flags.rebuild_doctype_for_global_search:\n\t\t\tfrappe.enqueue('frappe.utils.global_search.rebuild_for_doctype',\n\t\t\t\tnow=True, doctype=self.doc_type)\n\n\tdef set_property_setters(self):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\n\t\t# doctype\n\t\tself.set_property_setters_for_doctype(meta)\n\n\t\t# docfield\n\t\tfor df in self.get(\"fields\"):\n\t\t\tmeta_df = meta.get(\"fields\", {\"fieldname\": df.fieldname})\n\t\t\tif not meta_df or meta_df[0].get(\"is_custom_field\"):\n\t\t\t\tcontinue\n\t\t\tself.set_property_setters_for_docfield(meta, df, meta_df)\n\n\t\t# action and links\n\t\tself.set_property_setters_for_actions_and_links(meta)\n\n\tdef set_property_setters_for_doctype(self, meta):\n\t\tfor prop, prop_type in doctype_properties.items():\n\t\t\tif self.get(prop) != meta.get(prop):\n\t\t\t\tself.make_property_setter(prop, self.get(prop), prop_type)\n\n\tdef set_property_setters_for_docfield(self, meta, df, meta_df):\n\t\tfor prop, prop_type in docfield_properties.items():\n\t\t\tif prop != \"idx\" and (df.get(prop) or '') != (meta_df[0].get(prop) or ''):\n\t\t\t\tif not self.allow_property_change(prop, meta_df, df):\n\t\t\t\t\tcontinue\n\n\t\t\t\tself.make_property_setter(prop, df.get(prop), prop_type,\n\t\t\t\t\tfieldname=df.fieldname)\n\n\tdef allow_property_change(self, prop, meta_df, df):\n\t\tif prop == \"fieldtype\":\n\t\t\tself.validate_fieldtype_change(df, meta_df[0].get(prop), df.get(prop))\n\n\t\telif prop == \"length\":\n\t\t\told_value_length = cint(meta_df[0].get(prop))\n\t\t\tnew_value_length = cint(df.get(prop))\n\n\t\t\tif new_value_length and (old_value_length > new_value_length):\n\t\t\t\tself.check_length_for_fieldtypes.append({'df': df, 'old_value': meta_df[0].get(prop)})\n\t\t\t\tself.validate_fieldtype_length()\n\t\t\telse:\n\t\t\t\tself.flags.update_db = True\n\n\t\telif prop == \"allow_on_submit\" and df.get(prop):\n\t\t\tif not frappe.db.get_value(\"DocField\",\n\t\t\t\t{\"parent\": self.doc_type, \"fieldname\": df.fieldname}, \"allow_on_submit\"):\n\t\t\t\tfrappe.msgprint(_(\"Row {0}: Not allowed to enable Allow on Submit for standard fields\")\\\n\t\t\t\t\t.format(df.idx))\n\t\t\t\treturn False\n\n\t\telif prop == \"reqd\" and \\\n\t\t\t((frappe.db.get_value(\"DocField\",\n\t\t\t\t{\"parent\":self.doc_type,\"fieldname\":df.fieldname}, \"reqd\") == 1) \\\n\t\t\t\tand (df.get(prop) == 0)):\n\t\t\tfrappe.msgprint(_(\"Row {0}: Not allowed to disable Mandatory for standard fields\")\\\n\t\t\t\t\t.format(df.idx))\n\t\t\treturn False\n\n\t\telif prop == \"in_list_view\" and df.get(prop) \\\n\t\t\tand df.fieldtype!=\"Attach Image\" and df.fieldtype in no_value_fields:\n\t\t\t\t\tfrappe.msgprint(_(\"'In List View' not allowed for type {0} in row {1}\")\n\t\t\t\t\t\t.format(df.fieldtype, df.idx))\n\t\t\t\t\treturn False\n\n\t\telif prop == \"precision\" and cint(df.get(\"precision\")) > 6 \\\n\t\t\t\tand cint(df.get(\"precision\")) > cint(meta_df[0].get(\"precision\")):\n\t\t\tself.flags.update_db = True\n\n\t\telif prop == \"unique\":\n\t\t\tself.flags.update_db = True\n\n\t\telif (prop == \"read_only\" and cint(df.get(\"read_only\"))==0\n\t\t\t\tand frappe.db.get_value(\"DocField\", {\"parent\": self.doc_type,\n\t\t\t\t\"fieldname\": df.fieldname}, \"read_only\")==1):\n\t\t\t# if docfield has read_only checked and user is trying to make it editable, don't allow it\n\t\t\tfrappe.msgprint(_(\"You cannot unset 'Read Only' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif prop == \"options\" and df.get(\"fieldtype\") not in ALLOWED_OPTIONS_CHANGE:\n\t\t\tfrappe.msgprint(_(\"You can't set 'Options' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif prop == 'translatable' and not supports_translation(df.get('fieldtype')):\n\t\t\tfrappe.msgprint(_(\"You can't set 'Translatable' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif (prop == 'in_global_search' and\n\t\t\tdf.in_global_search != meta_df[0].get(\"in_global_search\")):\n\t\t\tself.flags.rebuild_doctype_for_global_search = True\n\n\t\treturn True\n\n\tdef set_property_setters_for_actions_and_links(self, meta):\n\t\t'''\n\t\tApply property setters or create custom records for DocType Action and DocType Link\n\t\t'''\n\t\tfor doctype, fieldname, field_map in (\n\t\t\t\t('DocType Link', 'links', doctype_link_properties),\n\t\t\t\t('DocType Action', 'actions', doctype_action_properties),\n\t\t\t\t('DocType State', 'states', doctype_state_properties),\n\t\t\t):\n\t\t\thas_custom = False\n\t\t\titems = []\n\t\t\tfor i, d in enumerate(self.get(fieldname) or []):\n\t\t\t\td.idx = i\n\t\t\t\tif frappe.db.exists(doctype, d.name) and not d.custom:\n\t\t\t\t\t# check property and apply property setter\n\t\t\t\t\toriginal = frappe.get_doc(doctype, d.name)\n\t\t\t\t\tfor prop, prop_type in field_map.items():\n\t\t\t\t\t\tif d.get(prop) != original.get(prop):\n\t\t\t\t\t\t\tself.make_property_setter(prop, d.get(prop), prop_type,\n\t\t\t\t\t\t\t\tapply_on=doctype, row_name=d.name)\n\t\t\t\t\titems.append(d.name)\n\t\t\t\telse:\n\t\t\t\t\t# custom - just insert/update\n\t\t\t\t\td.parent = self.doc_type\n\t\t\t\t\td.custom = 1\n\t\t\t\t\td.save(ignore_permissions=True)\n\t\t\t\t\thas_custom = True\n\t\t\t\t\titems.append(d.name)\n\n\t\t\tself.update_order_property_setter(has_custom, fieldname)\n\t\t\tself.clear_removed_items(doctype, items)\n\n\tdef update_order_property_setter(self, has_custom, fieldname):\n\t\t'''\n\t\tWe need to maintain the order of the link/actions if the user has shuffled them.\n\t\tSo we create a new property (ex `links_order`) to keep a list of items.\n\t\t'''\n\t\tproperty_name = '{}_order'.format(fieldname)\n\t\tif has_custom:\n\t\t\t# save the order of the actions and links\n\t\t\tself.make_property_setter(property_name,\n\t\t\t\tjson.dumps([d.name for d in self.get(fieldname)]), 'Small Text')\n\t\telse:\n\t\t\tfrappe.db.delete('Property Setter', dict(property=property_name,\n\t\t\t\tdoc_type=self.doc_type))\n\n\n\tdef clear_removed_items(self, doctype, items):\n\t\t'''\n\t\tClear rows that do not appear in `items`. These have been removed by the user.\n\t\t'''\n\t\tif items:\n\t\t\tfrappe.db.delete(doctype, dict(parent=self.doc_type, custom=1,\n\t\t\t\tname=('not in', items)))\n\t\telse:\n\t\t\tfrappe.db.delete(doctype, dict(parent=self.doc_type, custom=1))\n\n\tdef update_custom_fields(self):\n\t\tfor i, df in enumerate(self.get(\"fields\")):\n\t\t\tif df.get(\"is_custom_field\"):\n\t\t\t\tif not frappe.db.exists('Custom Field', {'dt': self.doc_type, 'fieldname': df.fieldname}):\n\t\t\t\t\tself.add_custom_field(df, i)\n\t\t\t\t\tself.flags.update_db = True\n\t\t\t\telse:\n\t\t\t\t\tself.update_in_custom_field(df, i)\n\n\t\tself.delete_custom_fields()\n\n\tdef add_custom_field(self, df, i):\n\t\td = frappe.new_doc(\"Custom Field\")\n\n\t\td.dt = self.doc_type\n\n\t\tfor prop in docfield_properties:\n\t\t\td.set(prop, df.get(prop))\n\n\t\tif i!=0:\n\t\t\td.insert_after = self.fields[i-1].fieldname\n\t\td.idx = i\n\n\t\td.insert()\n\t\tdf.fieldname = d.fieldname\n\n\tdef update_in_custom_field(self, df, i):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\t\tmeta_df = meta.get(\"fields\", {\"fieldname\": df.fieldname})\n\t\tif not (meta_df and meta_df[0].get(\"is_custom_field\")):\n\t\t\t# not a custom field\n\t\t\treturn\n\n\t\tcustom_field = frappe.get_doc(\"Custom Field\", meta_df[0].name)\n\t\tchanged = False\n\t\tfor prop in docfield_properties:\n\t\t\tif df.get(prop) != custom_field.get(prop):\n\t\t\t\tif prop == \"fieldtype\":\n\t\t\t\t\tself.validate_fieldtype_change(df, meta_df[0].get(prop), df.get(prop))\n\n\t\t\t\tcustom_field.set(prop, df.get(prop))\n\t\t\t\tchanged = True\n\n\t\t# check and update `insert_after` property\n\t\tif i!=0:\n\t\t\tinsert_after = self.fields[i-1].fieldname\n\t\t\tif custom_field.insert_after != insert_after:\n\t\t\t\tcustom_field.insert_after = insert_after\n\t\t\t\tcustom_field.idx = i\n\t\t\t\tchanged = True\n\n\t\tif changed:\n\t\t\tcustom_field.db_update()\n\t\t\tself.flags.update_db = True\n\t\t\t#custom_field.save()\n\n\tdef delete_custom_fields(self):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\t\tfields_to_remove = (\n\t\t\t{df.fieldname for df in meta.get(\"fields\")} - {df.fieldname for df in self.get(\"fields\")}\n\t\t)\n\t\tfor fieldname in fields_to_remove:\n\t\t\tdf = meta.get(\"fields\", {\"fieldname\": fieldname})[0]\n\t\t\tif df.get(\"is_custom_field\"):\n\t\t\t\tfrappe.delete_doc(\"Custom Field\", df.name)\n\n\tdef make_property_setter(self, prop, value, property_type, fieldname=None,\n\t\tapply_on=None, row_name = None):\n\t\tdelete_property_setter(self.doc_type, prop, fieldname, row_name)\n\n\t\tproperty_value = self.get_existing_property_value(prop, fieldname)\n\n\t\tif property_value==value:\n\t\t\treturn\n\n\t\tif not apply_on:\n\t\t\tapply_on = \"DocField\" if fieldname else \"DocType\"\n\n\t\t# create a new property setter\n\t\tfrappe.make_property_setter({\n\t\t\t\"doctype\": self.doc_type,\n\t\t\t\"doctype_or_field\": apply_on,\n\t\t\t\"fieldname\": fieldname,\n\t\t\t\"row_name\": row_name,\n\t\t\t\"property\": prop,\n\t\t\t\"value\": value,\n\t\t\t\"property_type\": property_type\n\t\t})\n\n\tdef get_existing_property_value(self, property_name, fieldname=None):\n\t\t# check if there is any need to make property setter!\n\t\tif fieldname:\n\t\t\tproperty_value = frappe.db.get_value(\"DocField\", {\"parent\": self.doc_type,\n\t\t\t\t\"fieldname\": fieldname}, property_name)\n\t\telse:\n\t\t\tif frappe.db.has_column(\"DocType\", property_name):\n\t\t\t\tproperty_value = frappe.db.get_value(\"DocType\", self.doc_type, property_name)\n\t\t\telse:\n\t\t\t\tproperty_value = None\n\n\t\treturn property_value\n\n\tdef validate_fieldtype_change(self, df, old_value, new_value):\n\t\tif df.is_virtual:\n\t\t\treturn\n\n\t\tallowed = self.allow_fieldtype_change(old_value, new_value)\n\t\tif allowed:\n\t\t\told_value_length = cint(frappe.db.type_map.get(old_value)[1])\n\t\t\tnew_value_length = cint(frappe.db.type_map.get(new_value)[1])\n\n\t\t\t# Ignore fieldtype check validation if new field type has unspecified maxlength\n\t\t\t# Changes like DATA to TEXT, where new_value_lenth equals 0 will not be validated\n\t\t\tif new_value_length and (old_value_length > new_value_length):\n\t\t\t\tself.check_length_for_fieldtypes.append({'df': df, 'old_value': old_value})\n\t\t\t\tself.validate_fieldtype_length()\n\t\t\telse:\n\t\t\t\tself.flags.update_db = True\n\n\t\telse:\n\t\t\tfrappe.throw(_(\"Fieldtype cannot be changed from {0} to {1} in row {2}\").format(old_value, new_value, df.idx))\n\n\tdef validate_fieldtype_length(self):\n\t\tfor field in self.check_length_for_fieldtypes:\n\t\t\tdf = field.get('df')\n\t\t\tmax_length = cint(frappe.db.type_map.get(df.fieldtype)[1])\n\t\t\tfieldname = df.fieldname\n\t\t\tdocs = frappe.db.sql('''\n\t\t\t\tSELECT name, {fieldname}, LENGTH({fieldname}) AS len\n\t\t\t\tFROM `tab{doctype}`\n\t\t\t\tWHERE LENGTH({fieldname}) > {max_length}\n\t\t\t'''.format(\n\t\t\t\tfieldname=fieldname,\n\t\t\t\tdoctype=self.doc_type,\n\t\t\t\tmax_length=max_length\n\t\t\t), as_dict=True)\n\t\t\tlinks = []\n\t\t\tlabel = df.label\n\t\t\tfor doc in docs:\n\t\t\t\tlinks.append(frappe.utils.get_link_to_form(self.doc_type, doc.name))\n\t\t\tlinks_str = ', '.join(links)\n\n\t\t\tif docs:\n\t\t\t\tfrappe.throw(_('Value for field {0} is too long in {1}. Length should be lesser than {2} characters')\n\t\t\t\t\t.format(\n\t\t\t\t\t\tfrappe.bold(label),\n\t\t\t\t\t\tlinks_str,\n\t\t\t\t\t\tfrappe.bold(max_length)\n\t\t\t\t\t), title=_('Data Too Long'), is_minimizable=len(docs) > 1)\n\n\t\tself.flags.update_db = True\n\n\[email protected]()\n\tdef reset_to_defaults(self):\n\t\tif not self.doc_type:\n\t\t\treturn\n\n\t\treset_customization(self.doc_type)\n\t\tself.fetch_to_customize()\n\n\t@classmethod\n\tdef allow_fieldtype_change(self, old_type: str, new_type: str) -> bool:\n\t\t\"\"\" allow type change, if both old_type and new_type are in same field group.\n\t\tfield groups are defined in ALLOWED_FIELDTYPE_CHANGE variables.\n\t\t\"\"\"\n\t\tin_field_group = lambda group: (old_type in group) and (new_type in group)\n\t\treturn any(map(in_field_group, ALLOWED_FIELDTYPE_CHANGE))\n\ndef reset_customization(doctype):\n\tsetters = frappe.get_all(\"Property Setter\", filters={\n\t\t'doc_type': doctype,\n\t\t'field_name': ['!=', 'naming_series'],\n\t\t'property': ['!=', 'options']\n\t}, pluck='name')\n\n\tfor setter in setters:\n\t\tfrappe.delete_doc(\"Property Setter\", setter)\n\n\tfrappe.clear_cache(doctype=doctype)\n\ndoctype_properties = {\n\t'search_fields': 'Data',\n\t'title_field': 'Data',\n\t'image_field': 'Data',\n\t'sort_field': 'Data',\n\t'sort_order': 'Data',\n\t'default_print_format': 'Data',\n\t'allow_copy': 'Check',\n\t'istable': 'Check',\n\t'quick_entry': 'Check',\n\t'editable_grid': 'Check',\n\t'max_attachments': 'Int',\n\t'track_changes': 'Check',\n\t'track_views': 'Check',\n\t'allow_auto_repeat': 'Check',\n\t'allow_import': 'Check',\n\t'show_preview_popup': 'Check',\n\t'default_email_template': 'Data',\n\t'email_append_to': 'Check',\n\t'subject_field': 'Data',\n\t'sender_field': 'Data',\n\t'autoname': 'Data',\n\t'show_title_field_in_link': 'Check'\n}\n\ndocfield_properties = {\n\t'idx': 'Int',\n\t'label': 'Data',\n\t'fieldtype': 'Select',\n\t'options': 'Text',\n\t'fetch_from': 'Small Text',\n\t'fetch_if_empty': 'Check',\n\t'show_dashboard': 'Check',\n\t'permlevel': 'Int',\n\t'width': 'Data',\n\t'print_width': 'Data',\n\t'non_negative': 'Check',\n\t'reqd': 'Check',\n\t'unique': 'Check',\n\t'ignore_user_permissions': 'Check',\n\t'in_list_view': 'Check',\n\t'in_standard_filter': 'Check',\n\t'in_global_search': 'Check',\n\t'in_preview': 'Check',\n\t'bold': 'Check',\n\t'hidden': 'Check',\n\t'collapsible': 'Check',\n\t'collapsible_depends_on': 'Data',\n\t'print_hide': 'Check',\n\t'print_hide_if_no_value': 'Check',\n\t'report_hide': 'Check',\n\t'allow_on_submit': 'Check',\n\t'translatable': 'Check',\n\t'mandatory_depends_on': 'Data',\n\t'read_only_depends_on': 'Data',\n\t'depends_on': 'Data',\n\t'description': 'Text',\n\t'default': 'Text',\n\t'precision': 'Select',\n\t'read_only': 'Check',\n\t'length': 'Int',\n\t'columns': 'Int',\n\t'remember_last_selected_value': 'Check',\n\t'allow_bulk_edit': 'Check',\n\t'auto_repeat': 'Link',\n\t'allow_in_quick_entry': 'Check',\n\t'hide_border': 'Check',\n\t'hide_days': 'Check',\n\t'hide_seconds': 'Check',\n\t'is_virtual': 'Check',\n}\n\ndoctype_link_properties = {\n\t'link_doctype': 'Link',\n\t'link_fieldname': 'Data',\n\t'group': 'Data',\n\t'hidden': 'Check'\n}\n\ndoctype_action_properties = {\n\t'label': 'Link',\n\t'action_type': 'Select',\n\t'action': 'Small Text',\n\t'group': 'Data',\n\t'hidden': 'Check'\n}\n\ndoctype_state_properties = {\n\t'title': 'Data',\n\t'color': 'Select'\n}\n\n\nALLOWED_FIELDTYPE_CHANGE = (\n\t('Currency', 'Float', 'Percent'),\n\t('Small Text', 'Data'),\n\t('Text', 'Data'),\n\t('Text', 'Text Editor', 'Code', 'Signature', 'HTML Editor'),\n\t('Data', 'Select'),\n\t('Text', 'Small Text'),\n\t('Text', 'Data', 'Barcode'),\n\t('Code', 'Geolocation'),\n\t('Table', 'Table MultiSelect'))\n\nALLOWED_OPTIONS_CHANGE = ('Read Only', 'HTML', 'Select', 'Data')\n", "path": "frappe/custom/doctype/customize_form/customize_form.py" } ]
[ { "content": "# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See LICENSE\n\n\"\"\"\n\tCustomize Form is a Single DocType used to mask the Property Setter\n\tThus providing a better UI from user perspective\n\"\"\"\nimport json\nimport frappe\nimport frappe.translate\nfrom frappe import _\nfrom frappe.utils import cint\nfrom frappe.model.document import Document\nfrom frappe.model import no_value_fields, core_doctypes_list\nfrom frappe.core.doctype.doctype.doctype import validate_fields_for_doctype, check_email_append_to\nfrom frappe.custom.doctype.custom_field.custom_field import create_custom_field\nfrom frappe.custom.doctype.property_setter.property_setter import delete_property_setter\nfrom frappe.model.docfield import supports_translation\nfrom frappe.core.doctype.doctype.doctype import validate_series\n\n\nclass CustomizeForm(Document):\n\tdef on_update(self):\n\t\tfrappe.db.delete(\"Singles\", {\"doctype\": \"Customize Form\"})\n\t\tfrappe.db.delete(\"Customize Form Field\")\n\n\[email protected]()\n\tdef fetch_to_customize(self):\n\t\tself.clear_existing_doc()\n\t\tif not self.doc_type:\n\t\t\treturn\n\n\t\tmeta = frappe.get_meta(self.doc_type)\n\n\t\tself.validate_doctype(meta)\n\n\t\t# load the meta properties on the customize (self) object\n\t\tself.load_properties(meta)\n\n\t\t# load custom translation\n\t\ttranslation = self.get_name_translation()\n\t\tself.label = translation.translated_text if translation else ''\n\n\t\tself.create_auto_repeat_custom_field_if_required(meta)\n\n\t\t# NOTE doc (self) is sent to clientside by run_method\n\n\tdef validate_doctype(self, meta):\n\t\t'''\n\t\tCheck if the doctype is allowed to be customized.\n\t\t'''\n\t\tif self.doc_type in core_doctypes_list:\n\t\t\tfrappe.throw(_(\"Core DocTypes cannot be customized.\"))\n\n\t\tif meta.issingle:\n\t\t\tfrappe.throw(_(\"Single DocTypes cannot be customized.\"))\n\n\t\tif meta.custom:\n\t\t\tfrappe.throw(_(\"Only standard DocTypes are allowed to be customized from Customize Form.\"))\n\n\tdef load_properties(self, meta):\n\t\t'''\n\t\tLoad the customize object (this) with the metadata properties\n\t\t'''\n\t\t# doctype properties\n\t\tfor prop in doctype_properties:\n\t\t\tself.set(prop, meta.get(prop))\n\n\t\tfor d in meta.get(\"fields\"):\n\t\t\tnew_d = {\"fieldname\": d.fieldname, \"is_custom_field\": d.get(\"is_custom_field\"), \"name\": d.name}\n\t\t\tfor prop in docfield_properties:\n\t\t\t\tnew_d[prop] = d.get(prop)\n\t\t\tself.append(\"fields\", new_d)\n\n\t\tfor fieldname in ('links', 'actions', 'states'):\n\t\t\tfor d in meta.get(fieldname):\n\t\t\t\tself.append(fieldname, d)\n\n\tdef create_auto_repeat_custom_field_if_required(self, meta):\n\t\t'''\n\t\tCreate auto repeat custom field if it's not already present\n\t\t'''\n\t\tif self.allow_auto_repeat:\n\t\t\tall_fields = [df.fieldname for df in meta.fields]\n\n\t\t\tif \"auto_repeat\" in all_fields:\n\t\t\t\treturn\n\n\t\t\tinsert_after = self.fields[len(self.fields) - 1].fieldname\n\t\t\tcreate_custom_field(self.doc_type, dict(\n\t\t\t\tfieldname='auto_repeat',\n\t\t\t\tlabel='Auto Repeat',\n\t\t\t\tfieldtype='Link',\n\t\t\t\toptions='Auto Repeat',\n\t\t\t\tinsert_after=insert_after,\n\t\t\t\tread_only=1, no_copy=1, print_hide=1\n\t\t\t))\n\n\n\tdef get_name_translation(self):\n\t\t'''Get translation object if exists of current doctype name in the default language'''\n\t\treturn frappe.get_value('Translation', {\n\t\t\t\t'source_text': self.doc_type,\n\t\t\t\t'language': frappe.local.lang or 'en'\n\t\t\t}, ['name', 'translated_text'], as_dict=True)\n\n\tdef set_name_translation(self):\n\t\t'''Create, update custom translation for this doctype'''\n\t\tcurrent = self.get_name_translation()\n\t\tif not self.label:\n\t\t\tif current:\n\t\t\t\t# clear translation\n\t\t\t\tfrappe.delete_doc('Translation', current.name)\n\t\t\treturn\n\n\t\tif not current:\n\t\t\tfrappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": 'Translation',\n\t\t\t\t\t\"source_text\": self.doc_type,\n\t\t\t\t\t\"translated_text\": self.label,\n\t\t\t\t\t\"language_code\": frappe.local.lang or 'en'\n\t\t\t\t}\n\t\t\t).insert()\n\t\t\treturn\n\n\t\tif self.label != current.translated_text:\n\t\t\tfrappe.db.set_value('Translation', current.name, 'translated_text', self.label)\n\t\t\tfrappe.translate.clear_cache()\n\n\tdef clear_existing_doc(self):\n\t\tdoc_type = self.doc_type\n\n\t\tfor fieldname in self.meta.get_valid_columns():\n\t\t\tself.set(fieldname, None)\n\n\t\tfor df in self.meta.get_table_fields():\n\t\t\tself.set(df.fieldname, [])\n\n\t\tself.doc_type = doc_type\n\t\tself.name = \"Customize Form\"\n\n\[email protected]()\n\tdef save_customization(self):\n\t\tif not self.doc_type:\n\t\t\treturn\n\t\tvalidate_series(self, self.autoname, self.doc_type)\n\t\tself.flags.update_db = False\n\t\tself.flags.rebuild_doctype_for_global_search = False\n\t\tself.set_property_setters()\n\t\tself.update_custom_fields()\n\t\tself.set_name_translation()\n\t\tvalidate_fields_for_doctype(self.doc_type)\n\t\tcheck_email_append_to(self)\n\n\t\tif self.flags.update_db:\n\t\t\tfrappe.db.updatedb(self.doc_type)\n\n\t\tif not hasattr(self, 'hide_success') or not self.hide_success:\n\t\t\tfrappe.msgprint(_(\"{0} updated\").format(_(self.doc_type)), alert=True)\n\t\tfrappe.clear_cache(doctype=self.doc_type)\n\t\tself.fetch_to_customize()\n\n\t\tif self.flags.rebuild_doctype_for_global_search:\n\t\t\tfrappe.enqueue('frappe.utils.global_search.rebuild_for_doctype',\n\t\t\t\tnow=True, doctype=self.doc_type)\n\n\tdef set_property_setters(self):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\n\t\t# doctype\n\t\tself.set_property_setters_for_doctype(meta)\n\n\t\t# docfield\n\t\tfor df in self.get(\"fields\"):\n\t\t\tmeta_df = meta.get(\"fields\", {\"fieldname\": df.fieldname})\n\t\t\tif not meta_df or meta_df[0].get(\"is_custom_field\"):\n\t\t\t\tcontinue\n\t\t\tself.set_property_setters_for_docfield(meta, df, meta_df)\n\n\t\t# action and links\n\t\tself.set_property_setters_for_actions_and_links(meta)\n\n\tdef set_property_setters_for_doctype(self, meta):\n\t\tfor prop, prop_type in doctype_properties.items():\n\t\t\tif self.get(prop) != meta.get(prop):\n\t\t\t\tself.make_property_setter(prop, self.get(prop), prop_type)\n\n\tdef set_property_setters_for_docfield(self, meta, df, meta_df):\n\t\tfor prop, prop_type in docfield_properties.items():\n\t\t\tif prop != \"idx\" and (df.get(prop) or '') != (meta_df[0].get(prop) or ''):\n\t\t\t\tif not self.allow_property_change(prop, meta_df, df):\n\t\t\t\t\tcontinue\n\n\t\t\t\tself.make_property_setter(prop, df.get(prop), prop_type,\n\t\t\t\t\tfieldname=df.fieldname)\n\n\tdef allow_property_change(self, prop, meta_df, df):\n\t\tif prop == \"fieldtype\":\n\t\t\tself.validate_fieldtype_change(df, meta_df[0].get(prop), df.get(prop))\n\n\t\telif prop == \"length\":\n\t\t\told_value_length = cint(meta_df[0].get(prop))\n\t\t\tnew_value_length = cint(df.get(prop))\n\n\t\t\tif new_value_length and (old_value_length > new_value_length):\n\t\t\t\tself.check_length_for_fieldtypes.append({'df': df, 'old_value': meta_df[0].get(prop)})\n\t\t\t\tself.validate_fieldtype_length()\n\t\t\telse:\n\t\t\t\tself.flags.update_db = True\n\n\t\telif prop == \"allow_on_submit\" and df.get(prop):\n\t\t\tif not frappe.db.get_value(\"DocField\",\n\t\t\t\t{\"parent\": self.doc_type, \"fieldname\": df.fieldname}, \"allow_on_submit\"):\n\t\t\t\tfrappe.msgprint(_(\"Row {0}: Not allowed to enable Allow on Submit for standard fields\")\\\n\t\t\t\t\t.format(df.idx))\n\t\t\t\treturn False\n\n\t\telif prop == \"reqd\" and \\\n\t\t\t((frappe.db.get_value(\"DocField\",\n\t\t\t\t{\"parent\":self.doc_type,\"fieldname\":df.fieldname}, \"reqd\") == 1) \\\n\t\t\t\tand (df.get(prop) == 0)):\n\t\t\tfrappe.msgprint(_(\"Row {0}: Not allowed to disable Mandatory for standard fields\")\\\n\t\t\t\t\t.format(df.idx))\n\t\t\treturn False\n\n\t\telif prop == \"in_list_view\" and df.get(prop) \\\n\t\t\tand df.fieldtype!=\"Attach Image\" and df.fieldtype in no_value_fields:\n\t\t\t\t\tfrappe.msgprint(_(\"'In List View' not allowed for type {0} in row {1}\")\n\t\t\t\t\t\t.format(df.fieldtype, df.idx))\n\t\t\t\t\treturn False\n\n\t\telif prop == \"precision\" and cint(df.get(\"precision\")) > 6 \\\n\t\t\t\tand cint(df.get(\"precision\")) > cint(meta_df[0].get(\"precision\")):\n\t\t\tself.flags.update_db = True\n\n\t\telif prop == \"unique\":\n\t\t\tself.flags.update_db = True\n\n\t\telif (prop == \"read_only\" and cint(df.get(\"read_only\"))==0\n\t\t\t\tand frappe.db.get_value(\"DocField\", {\"parent\": self.doc_type,\n\t\t\t\t\"fieldname\": df.fieldname}, \"read_only\")==1):\n\t\t\t# if docfield has read_only checked and user is trying to make it editable, don't allow it\n\t\t\tfrappe.msgprint(_(\"You cannot unset 'Read Only' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif prop == \"options\" and df.get(\"fieldtype\") not in ALLOWED_OPTIONS_CHANGE:\n\t\t\tfrappe.msgprint(_(\"You can't set 'Options' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif prop == 'translatable' and not supports_translation(df.get('fieldtype')):\n\t\t\tfrappe.msgprint(_(\"You can't set 'Translatable' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif (prop == 'in_global_search' and\n\t\t\tdf.in_global_search != meta_df[0].get(\"in_global_search\")):\n\t\t\tself.flags.rebuild_doctype_for_global_search = True\n\n\t\treturn True\n\n\tdef set_property_setters_for_actions_and_links(self, meta):\n\t\t'''\n\t\tApply property setters or create custom records for DocType Action and DocType Link\n\t\t'''\n\t\tfor doctype, fieldname, field_map in (\n\t\t\t\t('DocType Link', 'links', doctype_link_properties),\n\t\t\t\t('DocType Action', 'actions', doctype_action_properties),\n\t\t\t\t('DocType State', 'states', doctype_state_properties),\n\t\t\t):\n\t\t\thas_custom = False\n\t\t\titems = []\n\t\t\tfor i, d in enumerate(self.get(fieldname) or []):\n\t\t\t\td.idx = i\n\t\t\t\tif frappe.db.exists(doctype, d.name) and not d.custom:\n\t\t\t\t\t# check property and apply property setter\n\t\t\t\t\toriginal = frappe.get_doc(doctype, d.name)\n\t\t\t\t\tfor prop, prop_type in field_map.items():\n\t\t\t\t\t\tif d.get(prop) != original.get(prop):\n\t\t\t\t\t\t\tself.make_property_setter(prop, d.get(prop), prop_type,\n\t\t\t\t\t\t\t\tapply_on=doctype, row_name=d.name)\n\t\t\t\t\titems.append(d.name)\n\t\t\t\telse:\n\t\t\t\t\t# custom - just insert/update\n\t\t\t\t\td.parent = self.doc_type\n\t\t\t\t\td.custom = 1\n\t\t\t\t\td.save(ignore_permissions=True)\n\t\t\t\t\thas_custom = True\n\t\t\t\t\titems.append(d.name)\n\n\t\t\tself.update_order_property_setter(has_custom, fieldname)\n\t\t\tself.clear_removed_items(doctype, items)\n\n\tdef update_order_property_setter(self, has_custom, fieldname):\n\t\t'''\n\t\tWe need to maintain the order of the link/actions if the user has shuffled them.\n\t\tSo we create a new property (ex `links_order`) to keep a list of items.\n\t\t'''\n\t\tproperty_name = '{}_order'.format(fieldname)\n\t\tif has_custom:\n\t\t\t# save the order of the actions and links\n\t\t\tself.make_property_setter(property_name,\n\t\t\t\tjson.dumps([d.name for d in self.get(fieldname)]), 'Small Text')\n\t\telse:\n\t\t\tfrappe.db.delete('Property Setter', dict(property=property_name,\n\t\t\t\tdoc_type=self.doc_type))\n\n\n\tdef clear_removed_items(self, doctype, items):\n\t\t'''\n\t\tClear rows that do not appear in `items`. These have been removed by the user.\n\t\t'''\n\t\tif items:\n\t\t\tfrappe.db.delete(doctype, dict(parent=self.doc_type, custom=1,\n\t\t\t\tname=('not in', items)))\n\t\telse:\n\t\t\tfrappe.db.delete(doctype, dict(parent=self.doc_type, custom=1))\n\n\tdef update_custom_fields(self):\n\t\tfor i, df in enumerate(self.get(\"fields\")):\n\t\t\tif df.get(\"is_custom_field\"):\n\t\t\t\tif not frappe.db.exists('Custom Field', {'dt': self.doc_type, 'fieldname': df.fieldname}):\n\t\t\t\t\tself.add_custom_field(df, i)\n\t\t\t\t\tself.flags.update_db = True\n\t\t\t\telse:\n\t\t\t\t\tself.update_in_custom_field(df, i)\n\n\t\tself.delete_custom_fields()\n\n\tdef add_custom_field(self, df, i):\n\t\td = frappe.new_doc(\"Custom Field\")\n\n\t\td.dt = self.doc_type\n\n\t\tfor prop in docfield_properties:\n\t\t\td.set(prop, df.get(prop))\n\n\t\tif i!=0:\n\t\t\td.insert_after = self.fields[i-1].fieldname\n\t\td.idx = i\n\n\t\td.insert()\n\t\tdf.fieldname = d.fieldname\n\n\tdef update_in_custom_field(self, df, i):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\t\tmeta_df = meta.get(\"fields\", {\"fieldname\": df.fieldname})\n\t\tif not (meta_df and meta_df[0].get(\"is_custom_field\")):\n\t\t\t# not a custom field\n\t\t\treturn\n\n\t\tcustom_field = frappe.get_doc(\"Custom Field\", meta_df[0].name)\n\t\tchanged = False\n\t\tfor prop in docfield_properties:\n\t\t\tif df.get(prop) != custom_field.get(prop):\n\t\t\t\tif prop == \"fieldtype\":\n\t\t\t\t\tself.validate_fieldtype_change(df, meta_df[0].get(prop), df.get(prop))\n\n\t\t\t\tcustom_field.set(prop, df.get(prop))\n\t\t\t\tchanged = True\n\n\t\t# check and update `insert_after` property\n\t\tif i!=0:\n\t\t\tinsert_after = self.fields[i-1].fieldname\n\t\t\tif custom_field.insert_after != insert_after:\n\t\t\t\tcustom_field.insert_after = insert_after\n\t\t\t\tcustom_field.idx = i\n\t\t\t\tchanged = True\n\n\t\tif changed:\n\t\t\tcustom_field.db_update()\n\t\t\tself.flags.update_db = True\n\t\t\t#custom_field.save()\n\n\tdef delete_custom_fields(self):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\t\tfields_to_remove = (\n\t\t\t{df.fieldname for df in meta.get(\"fields\")} - {df.fieldname for df in self.get(\"fields\")}\n\t\t)\n\t\tfor fieldname in fields_to_remove:\n\t\t\tdf = meta.get(\"fields\", {\"fieldname\": fieldname})[0]\n\t\t\tif df.get(\"is_custom_field\"):\n\t\t\t\tfrappe.delete_doc(\"Custom Field\", df.name)\n\n\tdef make_property_setter(self, prop, value, property_type, fieldname=None,\n\t\tapply_on=None, row_name = None):\n\t\tdelete_property_setter(self.doc_type, prop, fieldname, row_name)\n\n\t\tproperty_value = self.get_existing_property_value(prop, fieldname)\n\n\t\tif property_value==value:\n\t\t\treturn\n\n\t\tif not apply_on:\n\t\t\tapply_on = \"DocField\" if fieldname else \"DocType\"\n\n\t\t# create a new property setter\n\t\tfrappe.make_property_setter({\n\t\t\t\"doctype\": self.doc_type,\n\t\t\t\"doctype_or_field\": apply_on,\n\t\t\t\"fieldname\": fieldname,\n\t\t\t\"row_name\": row_name,\n\t\t\t\"property\": prop,\n\t\t\t\"value\": value,\n\t\t\t\"property_type\": property_type\n\t\t})\n\n\tdef get_existing_property_value(self, property_name, fieldname=None):\n\t\t# check if there is any need to make property setter!\n\t\tif fieldname:\n\t\t\tproperty_value = frappe.db.get_value(\"DocField\", {\"parent\": self.doc_type,\n\t\t\t\t\"fieldname\": fieldname}, property_name)\n\t\telse:\n\t\t\tif frappe.db.has_column(\"DocType\", property_name):\n\t\t\t\tproperty_value = frappe.db.get_value(\"DocType\", self.doc_type, property_name)\n\t\t\telse:\n\t\t\t\tproperty_value = None\n\n\t\treturn property_value\n\n\tdef validate_fieldtype_change(self, df, old_value, new_value):\n\t\tif df.is_virtual:\n\t\t\treturn\n\n\t\tallowed = self.allow_fieldtype_change(old_value, new_value)\n\t\tif allowed:\n\t\t\told_value_length = cint(frappe.db.type_map.get(old_value)[1])\n\t\t\tnew_value_length = cint(frappe.db.type_map.get(new_value)[1])\n\n\t\t\t# Ignore fieldtype check validation if new field type has unspecified maxlength\n\t\t\t# Changes like DATA to TEXT, where new_value_lenth equals 0 will not be validated\n\t\t\tif new_value_length and (old_value_length > new_value_length):\n\t\t\t\tself.check_length_for_fieldtypes.append({'df': df, 'old_value': old_value})\n\t\t\t\tself.validate_fieldtype_length()\n\t\t\telse:\n\t\t\t\tself.flags.update_db = True\n\n\t\telse:\n\t\t\tfrappe.throw(_(\"Fieldtype cannot be changed from {0} to {1} in row {2}\").format(old_value, new_value, df.idx))\n\n\tdef validate_fieldtype_length(self):\n\t\tfor field in self.check_length_for_fieldtypes:\n\t\t\tdf = field.get('df')\n\t\t\tmax_length = cint(frappe.db.type_map.get(df.fieldtype)[1])\n\t\t\tfieldname = df.fieldname\n\t\t\tdocs = frappe.db.sql('''\n\t\t\t\tSELECT name, {fieldname}, LENGTH({fieldname}) AS len\n\t\t\t\tFROM `tab{doctype}`\n\t\t\t\tWHERE LENGTH({fieldname}) > {max_length}\n\t\t\t'''.format(\n\t\t\t\tfieldname=fieldname,\n\t\t\t\tdoctype=self.doc_type,\n\t\t\t\tmax_length=max_length\n\t\t\t), as_dict=True)\n\t\t\tlinks = []\n\t\t\tlabel = df.label\n\t\t\tfor doc in docs:\n\t\t\t\tlinks.append(frappe.utils.get_link_to_form(self.doc_type, doc.name))\n\t\t\tlinks_str = ', '.join(links)\n\n\t\t\tif docs:\n\t\t\t\tfrappe.throw(_('Value for field {0} is too long in {1}. Length should be lesser than {2} characters')\n\t\t\t\t\t.format(\n\t\t\t\t\t\tfrappe.bold(label),\n\t\t\t\t\t\tlinks_str,\n\t\t\t\t\t\tfrappe.bold(max_length)\n\t\t\t\t\t), title=_('Data Too Long'), is_minimizable=len(docs) > 1)\n\n\t\tself.flags.update_db = True\n\n\[email protected]()\n\tdef reset_to_defaults(self):\n\t\tif not self.doc_type:\n\t\t\treturn\n\n\t\treset_customization(self.doc_type)\n\t\tself.fetch_to_customize()\n\n\t@classmethod\n\tdef allow_fieldtype_change(self, old_type: str, new_type: str) -> bool:\n\t\t\"\"\" allow type change, if both old_type and new_type are in same field group.\n\t\tfield groups are defined in ALLOWED_FIELDTYPE_CHANGE variables.\n\t\t\"\"\"\n\t\tin_field_group = lambda group: (old_type in group) and (new_type in group)\n\t\treturn any(map(in_field_group, ALLOWED_FIELDTYPE_CHANGE))\n\ndef reset_customization(doctype):\n\tsetters = frappe.get_all(\"Property Setter\", filters={\n\t\t'doc_type': doctype,\n\t\t'field_name': ['!=', 'naming_series'],\n\t\t'property': ['!=', 'options']\n\t}, pluck='name')\n\n\tfor setter in setters:\n\t\tfrappe.delete_doc(\"Property Setter\", setter)\n\n\tfrappe.clear_cache(doctype=doctype)\n\ndoctype_properties = {\n\t'search_fields': 'Data',\n\t'title_field': 'Data',\n\t'image_field': 'Data',\n\t'sort_field': 'Data',\n\t'sort_order': 'Data',\n\t'default_print_format': 'Data',\n\t'allow_copy': 'Check',\n\t'istable': 'Check',\n\t'quick_entry': 'Check',\n\t'editable_grid': 'Check',\n\t'max_attachments': 'Int',\n\t'track_changes': 'Check',\n\t'track_views': 'Check',\n\t'allow_auto_repeat': 'Check',\n\t'allow_import': 'Check',\n\t'show_preview_popup': 'Check',\n\t'default_email_template': 'Data',\n\t'email_append_to': 'Check',\n\t'subject_field': 'Data',\n\t'sender_field': 'Data',\n\t'autoname': 'Data',\n\t'show_title_field_in_link': 'Check'\n}\n\ndocfield_properties = {\n\t'idx': 'Int',\n\t'label': 'Data',\n\t'fieldtype': 'Select',\n\t'options': 'Text',\n\t'fetch_from': 'Small Text',\n\t'fetch_if_empty': 'Check',\n\t'show_dashboard': 'Check',\n\t'permlevel': 'Int',\n\t'width': 'Data',\n\t'print_width': 'Data',\n\t'non_negative': 'Check',\n\t'reqd': 'Check',\n\t'unique': 'Check',\n\t'ignore_user_permissions': 'Check',\n\t'in_list_view': 'Check',\n\t'in_standard_filter': 'Check',\n\t'in_global_search': 'Check',\n\t'in_preview': 'Check',\n\t'bold': 'Check',\n\t'no_copy': 'Check',\n\t'hidden': 'Check',\n\t'collapsible': 'Check',\n\t'collapsible_depends_on': 'Data',\n\t'print_hide': 'Check',\n\t'print_hide_if_no_value': 'Check',\n\t'report_hide': 'Check',\n\t'allow_on_submit': 'Check',\n\t'translatable': 'Check',\n\t'mandatory_depends_on': 'Data',\n\t'read_only_depends_on': 'Data',\n\t'depends_on': 'Data',\n\t'description': 'Text',\n\t'default': 'Text',\n\t'precision': 'Select',\n\t'read_only': 'Check',\n\t'length': 'Int',\n\t'columns': 'Int',\n\t'remember_last_selected_value': 'Check',\n\t'allow_bulk_edit': 'Check',\n\t'auto_repeat': 'Link',\n\t'allow_in_quick_entry': 'Check',\n\t'hide_border': 'Check',\n\t'hide_days': 'Check',\n\t'hide_seconds': 'Check',\n\t'is_virtual': 'Check',\n}\n\ndoctype_link_properties = {\n\t'link_doctype': 'Link',\n\t'link_fieldname': 'Data',\n\t'group': 'Data',\n\t'hidden': 'Check'\n}\n\ndoctype_action_properties = {\n\t'label': 'Link',\n\t'action_type': 'Select',\n\t'action': 'Small Text',\n\t'group': 'Data',\n\t'hidden': 'Check'\n}\n\ndoctype_state_properties = {\n\t'title': 'Data',\n\t'color': 'Select'\n}\n\n\nALLOWED_FIELDTYPE_CHANGE = (\n\t('Currency', 'Float', 'Percent'),\n\t('Small Text', 'Data'),\n\t('Text', 'Data'),\n\t('Text', 'Text Editor', 'Code', 'Signature', 'HTML Editor'),\n\t('Data', 'Select'),\n\t('Text', 'Small Text'),\n\t('Text', 'Data', 'Barcode'),\n\t('Code', 'Geolocation'),\n\t('Table', 'Table MultiSelect'))\n\nALLOWED_OPTIONS_CHANGE = ('Read Only', 'HTML', 'Select', 'Data')\n", "path": "frappe/custom/doctype/customize_form/customize_form.py" } ]
diff --git a/frappe/custom/doctype/customize_form/customize_form.py b/frappe/custom/doctype/customize_form/customize_form.py index 92a540447fd1..f1b6ab40edb1 100644 --- a/frappe/custom/doctype/customize_form/customize_form.py +++ b/frappe/custom/doctype/customize_form/customize_form.py @@ -540,6 +540,7 @@ def reset_customization(doctype): 'in_global_search': 'Check', 'in_preview': 'Check', 'bold': 'Check', + 'no_copy': 'Check', 'hidden': 'Check', 'collapsible': 'Check', 'collapsible_depends_on': 'Data', diff --git a/frappe/custom/doctype/customize_form/test_customize_form.py b/frappe/custom/doctype/customize_form/test_customize_form.py index 0fe39e0008ec..37198c5ba6a5 100644 --- a/frappe/custom/doctype/customize_form/test_customize_form.py +++ b/frappe/custom/doctype/customize_form/test_customize_form.py @@ -97,13 +97,18 @@ def test_save_customization_custom_field_property(self): custom_field = d.get("fields", {"fieldname": "test_custom_field"})[0] custom_field.reqd = 1 + custom_field.no_copy = 1 d.run_method("save_customization") self.assertEqual(frappe.db.get_value("Custom Field", "Event-test_custom_field", "reqd"), 1) + self.assertEqual(frappe.db.get_value("Custom Field", "Event-test_custom_field", "no_copy"), 1) custom_field = d.get("fields", {"is_custom_field": True})[0] custom_field.reqd = 0 + custom_field.no_copy = 0 d.run_method("save_customization") self.assertEqual(frappe.db.get_value("Custom Field", "Event-test_custom_field", "reqd"), 0) + self.assertEqual(frappe.db.get_value("Custom Field", "Event-test_custom_field", "no_copy"), 0) + def test_save_customization_new_field(self): d = self.get_customize_form("Event") diff --git a/frappe/custom/doctype/customize_form_field/customize_form_field.json b/frappe/custom/doctype/customize_form_field/customize_form_field.json index 4351e76609dd..5906cd3bcfaf 100644 --- a/frappe/custom/doctype/customize_form_field/customize_form_field.json +++ b/frappe/custom/doctype/customize_form_field/customize_form_field.json @@ -20,6 +20,7 @@ "in_global_search", "in_preview", "bold", + "no_copy", "allow_in_quick_entry", "translatable", "column_break_7", @@ -437,13 +438,19 @@ "fieldname": "show_dashboard", "fieldtype": "Check", "label": "Show Dashboard" + }, + { + "default": "0", + "fieldname": "no_copy", + "fieldtype": "Check", + "label": "No Copy" } ], "idx": 1, "index_web_pages_for_search": 1, "istable": 1, "links": [], - "modified": "2022-01-27 21:45:22.349776", + "modified": "2022-02-08 19:38:16.111199", "modified_by": "Administrator", "module": "Custom", "name": "Customize Form Field", @@ -453,4 +460,4 @@ "sort_field": "modified", "sort_order": "ASC", "states": [] -} \ No newline at end of file +}
dask__distributed-367
OverflowError when sending large sparse arrays I don't yet have a small reproducible example, but I can make this happen every time I try to collect many large sparse arrays. I do have a notebook that will produce it though, and can make that available. The traceback: ``` Traceback (most recent call last): File "/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/core.py", line 266, in write frames = protocol.dumps(msg) File "/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/protocol.py", line 81, in dumps frames = dumps_msgpack(small) File "/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/protocol.py", line 155, in dumps_msgpack fmt, payload = maybe_compress(payload) File "/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/protocol.py", line 137, in maybe_compress compressed = compress(payload) OverflowError: size does not fit in an int ``` A few notes: - Each array is roughly `675000 x 745`, and ~1% dense. The total bytes for indices + indptr + data is ~40MB each. - I can get each array individually, so it's not a problem with a chunk being too large - The error appears only when I'm collecting enough at once (for my size, 39 and and lower works fine). - At 41 arrays I get the above error, 40 arrays gives me a different (but probably related) error: ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-55-7b87709b6c67> in <module>() ----> 1 res = t.compute() /home/jcrist/dask/dask/base.pyc in compute(self, **kwargs) 84 Extra keywords to forward to the scheduler ``get`` function. 85 """ ---> 86 return compute(self, **kwargs)[0] 87 88 @classmethod /home/jcrist/dask/dask/base.pyc in compute(*args, **kwargs) 177 dsk = merge(var.dask for var in variables) 178 keys = [var._keys() for var in variables] --> 179 results = get(dsk, keys, **kwargs) 180 181 results_iter = iter(results) /home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/executor.pyc in get(self, dsk, keys, **kwargs) 1008 1009 if status == 'error': -> 1010 raise result 1011 else: 1012 return result ValueError: corrupt input at byte 2 ```
[ { "content": "\"\"\"\nThe distributed message protocol consists of the following parts:\n\n1. The length of the header, stored as a uint32\n2. The header, stored as msgpack.\n If there are no fields in the header then we skip it entirely.\n3. The payload, stored as possibly compressed msgpack\n4. A sentinel value\n\n**Header**\n\nThe Header contains the following fields:\n\n* **compression**: string, optional\n One of the following: ``'snappy', 'lz4', 'zlib'`` or missing for None\n\n**Payload**\n\nThe payload is any msgpack serializable value. It may be compressed based\non the header.\n\n**Sentinel**\n\nWe often terminate each message with a sentinel value. This happens\noutside of this module though and is not baked in.\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport random\nimport struct\n\ntry:\n import pandas.msgpack as msgpack\nexcept ImportError:\n import msgpack\n\nfrom toolz import first, keymap, identity, merge\n\nfrom .utils import ignoring\nfrom .compatibility import unicode\n\n\ncompressions = {None: {'compress': identity,\n 'decompress': identity}}\n\ndefault_compression = None\n\n\nwith ignoring(ImportError):\n import zlib\n compressions['zlib'] = {'compress': zlib.compress,\n 'decompress': zlib.decompress}\n\nwith ignoring(ImportError):\n import snappy\n compressions['snappy'] = {'compress': snappy.compress,\n 'decompress': snappy.decompress}\n default_compression = 'snappy'\n\nwith ignoring(ImportError):\n import lz4\n compressions['lz4'] = {'compress': lz4.LZ4_compress,\n 'decompress': lz4.LZ4_uncompress}\n default_compression = 'lz4'\n\n\ndef dumps(msg):\n \"\"\" Transform Python value to bytestream suitable for communication \"\"\"\n small_header = {}\n\n if isinstance(msg, dict):\n big = {k: v for k, v in msg.items()\n if isinstance(v, bytes) and len(v) > 1e6}\n else:\n big = False\n if big:\n small = {k: v for k, v in msg.items() if k not in big}\n else:\n small = msg\n\n frames = dumps_msgpack(small)\n if big:\n frames += dumps_big_byte_dict(big)\n\n return frames\n\n\ndef loads(frames):\n \"\"\" Transform bytestream back into Python value \"\"\"\n header, payload, frames = frames[0], frames[1], frames[2:]\n msg = loads_msgpack(header, payload)\n\n if frames:\n big = loads_big_byte_dict(*frames)\n msg.update(big)\n\n return msg\n\n\ndef byte_sample(b, size, n):\n \"\"\" Sample a bytestring from many locations \"\"\"\n starts = [random.randint(0, len(b) - size) for j in range(n)]\n ends = []\n for i, start in enumerate(starts[:-1]):\n ends.append(min(start + size, starts[i + 1]))\n ends.append(starts[-1] + size)\n\n return b''.join([b[start:end] for start, end in zip(starts, ends)])\n\n\ndef maybe_compress(payload, compression=default_compression, min_size=1e4,\n sample_size=1e4, nsamples=5):\n \"\"\" Maybe compress payload\n\n 1. We don't compress small messages\n 2. We sample the payload in a few spots, compress that, and if it doesn't\n do any good we return the original\n 3. We then compress the full original, it it doesn't compress well then we\n return the original\n 4. We return the compressed result\n \"\"\"\n if not compression:\n return None, payload\n if len(payload) < min_size:\n return None, payload\n\n min_size = int(min_size)\n sample_size = int(sample_size)\n\n compress = compressions[compression]['compress']\n\n # Compress a sample, return original if not very compressed\n sample = byte_sample(payload, sample_size, nsamples)\n if len(compress(sample)) > 0.9 * len(sample): # not very compressible\n return None, payload\n\n compressed = compress(payload)\n if len(compressed) > 0.9 * len(payload): # not very compressible\n return None, payload\n\n return compression, compress(payload)\n\n\ndef dumps_msgpack(msg):\n \"\"\" Dump msg into header and payload, both bytestrings\n\n All of the message must be msgpack encodable\n\n See Also:\n loads_msgpack\n \"\"\"\n header = {}\n payload = msgpack.dumps(msg, use_bin_type=True)\n\n fmt, payload = maybe_compress(payload)\n if fmt:\n header['compression'] = fmt\n\n if header:\n header_bytes = msgpack.dumps(header, use_bin_type=True)\n else:\n header_bytes = b''\n\n return [header_bytes, payload]\n\n\ndef loads_msgpack(header, payload):\n \"\"\" Read msgpack header and payload back to Python object\n\n See Also:\n dumps_msgpack\n \"\"\"\n if header:\n header = msgpack.loads(header, encoding='utf8')\n else:\n header = {}\n\n if header.get('compression'):\n try:\n decompress = compressions[header['compression']]['decompress']\n payload = decompress(payload)\n except KeyError:\n raise ValueError(\"Data is compressed as %s but we don't have this\"\n \" installed\" % header['compression'].decode())\n\n return msgpack.loads(payload, encoding='utf8')\n\n\ndef dumps_big_byte_dict(d):\n \"\"\" Serialize large byte dictionary to sequence of frames\n\n The input must be a dictionary and all values of that dictionary must be\n bytestrings. These should probably be large.\n\n Returns a sequence of frames, one header followed by each of the values\n\n See Also:\n loads_big_byte_dict\n \"\"\"\n assert isinstance(d, dict) and all(isinstance(v, bytes) for v in d.values())\n shards = {}\n for k, v in list(d.items()):\n if len(v) >= 2**31:\n L = []\n for i, j in enumerate(range(0, len(v), 2**30)):\n key = '.shard-%d-%s' % (i, k)\n d[key] = v[j: j + 2**30]\n L.append(key)\n del d[k]\n shards[k] = L\n\n keys, values = zip(*d.items())\n\n compress = compressions[default_compression]['compress']\n compression = []\n values2 = []\n for v in values:\n fmt, vv = maybe_compress(v)\n compression.append(fmt)\n values2.append(vv)\n\n header = {'encoding': 'big-byte-dict',\n 'keys': keys,\n 'compression': compression}\n if shards:\n header['shards'] = shards\n\n return [msgpack.dumps(header, use_bin_type=True)] + values2\n\n\ndef loads_big_byte_dict(header, *values):\n \"\"\" Deserialize big-byte frames to large byte dictionary\n\n See Also:\n dumps_big_byte_dict\n \"\"\"\n header = msgpack.loads(header, encoding='utf8')\n\n values2 = [compressions[c]['decompress'](v)\n for c, v in zip(header['compression'], values)]\n result = dict(zip(header['keys'], values2))\n\n for k, keys in header.get('shards', {}).items():\n result[k] = b''.join(result.pop(kk) for kk in keys)\n return result\n", "path": "distributed/protocol.py" } ]
[ { "content": "\"\"\"\nThe distributed message protocol consists of the following parts:\n\n1. The length of the header, stored as a uint32\n2. The header, stored as msgpack.\n If there are no fields in the header then we skip it entirely.\n3. The payload, stored as possibly compressed msgpack\n4. A sentinel value\n\n**Header**\n\nThe Header contains the following fields:\n\n* **compression**: string, optional\n One of the following: ``'snappy', 'lz4', 'zlib'`` or missing for None\n\n**Payload**\n\nThe payload is any msgpack serializable value. It may be compressed based\non the header.\n\n**Sentinel**\n\nWe often terminate each message with a sentinel value. This happens\noutside of this module though and is not baked in.\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport random\nimport struct\n\ntry:\n import pandas.msgpack as msgpack\nexcept ImportError:\n import msgpack\n\nfrom toolz import first, keymap, identity, merge\n\nfrom .utils import ignoring\nfrom .compatibility import unicode\n\n\ncompressions = {None: {'compress': identity,\n 'decompress': identity}}\n\ndefault_compression = None\n\n\nwith ignoring(ImportError):\n import zlib\n compressions['zlib'] = {'compress': zlib.compress,\n 'decompress': zlib.decompress}\n\nwith ignoring(ImportError):\n import snappy\n compressions['snappy'] = {'compress': snappy.compress,\n 'decompress': snappy.decompress}\n default_compression = 'snappy'\n\nwith ignoring(ImportError):\n import lz4\n compressions['lz4'] = {'compress': lz4.LZ4_compress,\n 'decompress': lz4.LZ4_uncompress}\n default_compression = 'lz4'\n\n\ndef dumps(msg):\n \"\"\" Transform Python value to bytestream suitable for communication \"\"\"\n small_header = {}\n\n if isinstance(msg, dict):\n big = {k: v for k, v in msg.items()\n if isinstance(v, bytes) and len(v) > 1e6}\n else:\n big = False\n if big:\n small = {k: v for k, v in msg.items() if k not in big}\n else:\n small = msg\n\n frames = dumps_msgpack(small)\n if big:\n frames += dumps_big_byte_dict(big)\n\n return frames\n\n\ndef loads(frames):\n \"\"\" Transform bytestream back into Python value \"\"\"\n header, payload, frames = frames[0], frames[1], frames[2:]\n msg = loads_msgpack(header, payload)\n\n if frames:\n big = loads_big_byte_dict(*frames)\n msg.update(big)\n\n return msg\n\n\ndef byte_sample(b, size, n):\n \"\"\" Sample a bytestring from many locations \"\"\"\n starts = [random.randint(0, len(b) - size) for j in range(n)]\n ends = []\n for i, start in enumerate(starts[:-1]):\n ends.append(min(start + size, starts[i + 1]))\n ends.append(starts[-1] + size)\n\n return b''.join([b[start:end] for start, end in zip(starts, ends)])\n\n\ndef maybe_compress(payload, compression=default_compression, min_size=1e4,\n sample_size=1e4, nsamples=5):\n \"\"\" Maybe compress payload\n\n 1. We don't compress small messages\n 2. We sample the payload in a few spots, compress that, and if it doesn't\n do any good we return the original\n 3. We then compress the full original, it it doesn't compress well then we\n return the original\n 4. We return the compressed result\n \"\"\"\n if not compression:\n return None, payload\n if len(payload) < min_size:\n return None, payload\n if len(payload) > 2**31:\n return None, payload\n\n min_size = int(min_size)\n sample_size = int(sample_size)\n\n compress = compressions[compression]['compress']\n\n # Compress a sample, return original if not very compressed\n sample = byte_sample(payload, sample_size, nsamples)\n if len(compress(sample)) > 0.9 * len(sample): # not very compressible\n return None, payload\n\n compressed = compress(payload)\n if len(compressed) > 0.9 * len(payload): # not very compressible\n return None, payload\n\n return compression, compress(payload)\n\n\ndef dumps_msgpack(msg):\n \"\"\" Dump msg into header and payload, both bytestrings\n\n All of the message must be msgpack encodable\n\n See Also:\n loads_msgpack\n \"\"\"\n header = {}\n payload = msgpack.dumps(msg, use_bin_type=True)\n\n fmt, payload = maybe_compress(payload)\n if fmt:\n header['compression'] = fmt\n\n if header:\n header_bytes = msgpack.dumps(header, use_bin_type=True)\n else:\n header_bytes = b''\n\n return [header_bytes, payload]\n\n\ndef loads_msgpack(header, payload):\n \"\"\" Read msgpack header and payload back to Python object\n\n See Also:\n dumps_msgpack\n \"\"\"\n if header:\n header = msgpack.loads(header, encoding='utf8')\n else:\n header = {}\n\n if header.get('compression'):\n try:\n decompress = compressions[header['compression']]['decompress']\n payload = decompress(payload)\n except KeyError:\n raise ValueError(\"Data is compressed as %s but we don't have this\"\n \" installed\" % header['compression'].decode())\n\n return msgpack.loads(payload, encoding='utf8')\n\n\ndef dumps_big_byte_dict(d):\n \"\"\" Serialize large byte dictionary to sequence of frames\n\n The input must be a dictionary and all values of that dictionary must be\n bytestrings. These should probably be large.\n\n Returns a sequence of frames, one header followed by each of the values\n\n See Also:\n loads_big_byte_dict\n \"\"\"\n assert isinstance(d, dict) and all(isinstance(v, bytes) for v in d.values())\n shards = {}\n for k, v in list(d.items()):\n if len(v) >= 2**31:\n L = []\n for i, j in enumerate(range(0, len(v), 2**30)):\n key = '.shard-%d-%s' % (i, k)\n d[key] = v[j: j + 2**30]\n L.append(key)\n del d[k]\n shards[k] = L\n\n keys, values = zip(*d.items())\n\n compress = compressions[default_compression]['compress']\n compression = []\n values2 = []\n for v in values:\n fmt, vv = maybe_compress(v)\n compression.append(fmt)\n values2.append(vv)\n\n header = {'encoding': 'big-byte-dict',\n 'keys': keys,\n 'compression': compression}\n if shards:\n header['shards'] = shards\n\n return [msgpack.dumps(header, use_bin_type=True)] + values2\n\n\ndef loads_big_byte_dict(header, *values):\n \"\"\" Deserialize big-byte frames to large byte dictionary\n\n See Also:\n dumps_big_byte_dict\n \"\"\"\n header = msgpack.loads(header, encoding='utf8')\n\n values2 = [compressions[c]['decompress'](v)\n for c, v in zip(header['compression'], values)]\n result = dict(zip(header['keys'], values2))\n\n for k, keys in header.get('shards', {}).items():\n result[k] = b''.join(result.pop(kk) for kk in keys)\n return result\n", "path": "distributed/protocol.py" } ]
diff --git a/distributed/protocol.py b/distributed/protocol.py index ca67ddbdd4f..21d1584f968 100644 --- a/distributed/protocol.py +++ b/distributed/protocol.py @@ -123,6 +123,8 @@ def maybe_compress(payload, compression=default_compression, min_size=1e4, return None, payload if len(payload) < min_size: return None, payload + if len(payload) > 2**31: + return None, payload min_size = int(min_size) sample_size = int(sample_size)
piskvorky__gensim-2869
Investigate and fix Keras problem under Python 3.8 One of our unit tests fails on Travis under Py3.8. ``` =================================== FAILURES =================================== _____________ TestKerasWord2VecWrapper.testEmbeddingLayerCosineSim _____________ self = <gensim.test.test_keras_integration.TestKerasWord2VecWrapper testMethod=testEmbeddingLayerCosineSim> def testEmbeddingLayerCosineSim(self): """ Test Keras 'Embedding' layer returned by 'get_embedding_layer' function for a simple word similarity task. """ keras_w2v_model = self.model_cos_sim keras_w2v_model_wv = keras_w2v_model.wv embedding_layer = keras_w2v_model_wv.get_keras_embedding() input_a = Input(shape=(1,), dtype='int32', name='input_a') input_b = Input(shape=(1,), dtype='int32', name='input_b') embedding_a = embedding_layer(input_a) embedding_b = embedding_layer(input_b) similarity = dot([embedding_a, embedding_b], axes=2, normalize=True) > model = Model(input=[input_a, input_b], output=similarity) embedding_a = <tf.Tensor 'embedding_4/Identity:0' shape=(None, 1, 100) dtype=float32> embedding_b = <tf.Tensor 'embedding_4_1/Identity:0' shape=(None, 1, 100) dtype=float32> embedding_layer = <tensorflow.python.keras.layers.embeddings.Embedding object at 0x7f603df0d130> input_a = <tf.Tensor 'input_a_3:0' shape=(None, 1) dtype=int32> input_b = <tf.Tensor 'input_b_3:0' shape=(None, 1) dtype=int32> keras_w2v_model = <gensim.models.word2vec.Word2Vec object at 0x7f603df0d760> keras_w2v_model_wv = <gensim.models.keyedvectors.Word2VecKeyedVectors object at 0x7f603df0d250> self = <gensim.test.test_keras_integration.TestKerasWord2VecWrapper testMethod=testEmbeddingLayerCosineSim> similarity = <tf.Tensor 'dot_3/Identity:0' shape=(None, 1, 1) dtype=float32> gensim/test/test_keras_integration.py:62: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ .tox/py38-linux/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:167: in __init__ super(Model, self).__init__(*args, **kwargs) __class__ = <class 'tensorflow.python.keras.engine.training.Model'> args = () kwargs = {'input': [<tf.Tensor 'input_a_3:0' shape=(None, 1) dtype=int32>, <tf.Tensor 'input_b_3:0' shape=(None, 1) dtype=int32>], 'output': <tf.Tensor 'dot_3/Identity:0' shape=(None, 1, 1) dtype=float32>} self = <tensorflow.python.keras.engine.training.Model object at 0x7f603df18ac0> .tox/py38-linux/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py:176: in __init__ self._init_subclassed_network(**kwargs) args = () kwargs = {'input': [<tf.Tensor 'input_a_3:0' shape=(None, 1) dtype=int32>, <tf.Tensor 'input_b_3:0' shape=(None, 1) dtype=int32>], 'output': <tf.Tensor 'dot_3/Identity:0' shape=(None, 1, 1) dtype=float32>} self = <tensorflow.python.keras.engine.training.Model object at 0x7f603df18ac0> .tox/py38-linux/lib/python3.8/site-packages/tensorflow/python/training/tracking/base.py:456: in _method_wrapper result = method(self, *args, **kwargs) args = () kwargs = {'input': [<tf.Tensor 'input_a_3:0' shape=(None, 1) dtype=int32>, <tf.Tensor 'input_b_3:0' shape=(None, 1) dtype=int32>], 'output': <tf.Tensor 'dot_3/Identity:0' shape=(None, 1, 1) dtype=float32>} method = <function Network._init_subclassed_network at 0x7f60465f6d30> previous_value = True self = <tensorflow.python.keras.engine.training.Model object at 0x7f603df18ac0> .tox/py38-linux/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py:367: in _init_subclassed_network self._base_init(name=name, **kwargs) kwargs = {'input': [<tf.Tensor 'input_a_3:0' shape=(None, 1) dtype=int32>, <tf.Tensor 'input_b_3:0' shape=(None, 1) dtype=int32>], 'output': <tf.Tensor 'dot_3/Identity:0' shape=(None, 1, 1) dtype=float32>} name = None self = <tensorflow.python.keras.engine.training.Model object at 0x7f603df18ac0> .tox/py38-linux/lib/python3.8/site-packages/tensorflow/python/training/tracking/base.py:456: in _method_wrapper result = method(self, *args, **kwargs) args = () kwargs = {'input': [<tf.Tensor 'input_a_3:0' shape=(None, 1) dtype=int32>, <tf.Tensor 'input_b_3:0' shape=(None, 1) dtype=int32>], 'name': None, 'output': <tf.Tensor 'dot_3/Identity:0' shape=(None, 1, 1) dtype=float32>} method = <function Network._base_init at 0x7f60465f6a60> previous_value = False self = <tensorflow.python.keras.engine.training.Model object at 0x7f603df18ac0> .tox/py38-linux/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py:202: in _base_init generic_utils.validate_kwargs(kwargs, {'trainable', 'dtype', 'dynamic', __class__ = <class 'tensorflow.python.keras.engine.network.Network'> kwargs = {'input': [<tf.Tensor 'input_a_3:0' shape=(None, 1) dtype=int32>, <tf.Tensor 'input_b_3:0' shape=(None, 1) dtype=int32>], 'output': <tf.Tensor 'dot_3/Identity:0' shape=(None, 1, 1) dtype=float32>} name = None self = <tensorflow.python.keras.engine.training.Model object at 0x7f603df18ac0> _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ kwargs = {'input': [<tf.Tensor 'input_a_3:0' shape=(None, 1) dtype=int32>, <tf.Tensor 'input_b_3:0' shape=(None, 1) dtype=int32>], 'output': <tf.Tensor 'dot_3/Identity:0' shape=(None, 1, 1) dtype=float32>} allowed_kwargs = {'autocast', 'dtype', 'dynamic', 'trainable'} error_message = 'Keyword argument not understood:' def validate_kwargs(kwargs, allowed_kwargs, error_message='Keyword argument not understood:'): """Checks that all keyword arguments are in the set of allowed keys.""" for kwarg in kwargs: if kwarg not in allowed_kwargs: > raise TypeError(error_message, kwarg) E TypeError: ('Keyword argument not understood:', 'input') allowed_kwargs = {'autocast', 'dtype', 'dynamic', 'trainable'} error_message = 'Keyword argument not understood:' kwarg = 'input' kwargs = {'input': [<tf.Tensor 'input_a_3:0' shape=(None, 1) dtype=int32>, <tf.Tensor 'input_b_3:0' shape=(None, 1) dtype=int32>], 'output': <tf.Tensor 'dot_3/Identity:0' shape=(None, 1, 1) dtype=float32>} ```
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2014 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nRun with::\n\n python ./setup.py install\n\"\"\"\n\nimport distutils.cmd\nimport distutils.log\nimport itertools\nimport os.path\nimport platform\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.build_ext import build_ext\n\n\nc_extensions = {\n 'gensim.models.word2vec_inner': 'gensim/models/word2vec_inner.c',\n 'gensim.corpora._mmreader': 'gensim/corpora/_mmreader.c',\n 'gensim.models.fasttext_inner': 'gensim/models/fasttext_inner.c',\n 'gensim.models._utils_any2vec': 'gensim/models/_utils_any2vec.c',\n 'gensim._matutils': 'gensim/_matutils.c',\n 'gensim.models.nmf_pgd': 'gensim/models/nmf_pgd.c',\n}\n\ncpp_extensions = {\n 'gensim.models.doc2vec_inner': 'gensim/models/doc2vec_inner.cpp',\n 'gensim.models.word2vec_corpusfile': 'gensim/models/word2vec_corpusfile.cpp',\n 'gensim.models.fasttext_corpusfile': 'gensim/models/fasttext_corpusfile.cpp',\n 'gensim.models.doc2vec_corpusfile': 'gensim/models/doc2vec_corpusfile.cpp',\n}\n\n\ndef need_cython():\n \"\"\"Return True if we need Cython to translate any of the extensions.\n\n If the extensions have already been translated to C/C++, then we don't need\n to install Cython and perform the translation.\"\"\"\n expected = list(c_extensions.values()) + list(cpp_extensions.values())\n return any([not os.path.isfile(f) for f in expected])\n\n\ndef make_c_ext(use_cython=False):\n for module, source in c_extensions.items():\n if use_cython:\n source = source.replace('.c', '.pyx')\n yield Extension(module, sources=[source], language='c')\n\n\ndef make_cpp_ext(use_cython=False):\n extra_args = []\n system = platform.system()\n\n if system == 'Linux':\n extra_args.append('-std=c++11')\n elif system == 'Darwin':\n extra_args.extend(['-stdlib=libc++', '-std=c++11'])\n\n for module, source in cpp_extensions.items():\n if use_cython:\n source = source.replace('.cpp', '.pyx')\n yield Extension(\n module,\n sources=[source],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args,\n )\n\n\n#\n# We use use_cython=False here for two reasons:\n#\n# 1. Cython may not be available at this stage\n# 2. The actual translation from Cython to C/C++ happens inside CustomBuildExt\n#\next_modules = list(itertools.chain(make_c_ext(use_cython=False), make_cpp_ext(use_cython=False)))\n\n\nclass CustomBuildExt(build_ext):\n \"\"\"Custom build_ext action with bootstrapping.\n\n We need this in order to use numpy and Cython in this script without\n importing them at module level, because they may not be available yet.\n \"\"\"\n #\n # http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py\n #\n def finalize_options(self):\n build_ext.finalize_options(self)\n # Prevent numpy from thinking it is still in its setup process:\n # https://docs.python.org/2/library/__builtin__.html#module-__builtin__\n __builtins__.__NUMPY_SETUP__ = False\n\n import numpy\n self.include_dirs.append(numpy.get_include())\n\n if need_cython():\n import Cython.Build\n Cython.Build.cythonize(list(make_c_ext(use_cython=True)))\n Cython.Build.cythonize(list(make_cpp_ext(use_cython=True)))\n\n\nclass CleanExt(distutils.cmd.Command):\n description = 'Remove C sources, C++ sources and binaries for gensim extensions'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for root, dirs, files in os.walk('gensim'):\n files = [\n os.path.join(root, f)\n for f in files\n if os.path.splitext(f)[1] in ('.c', '.cpp', '.so')\n ]\n for f in files:\n self.announce('removing %s' % f, level=distutils.log.INFO)\n os.unlink(f)\n\n if os.path.isdir('build'):\n self.announce('recursively removing build', level=distutils.log.INFO)\n shutil.rmtree('build')\n\n\ncmdclass = {'build_ext': CustomBuildExt, 'clean_ext': CleanExt}\n\nWHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}\nif WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):\n import wheelhouse_uploader.cmd\n cmdclass.update(vars(wheelhouse_uploader.cmd))\n\n\nLONG_DESCRIPTION = u\"\"\"\n==============================================\ngensim -- Topic Modelling in Python\n==============================================\n\n|Travis|_\n|Wheel|_\n\n.. |Travis| image:: https://img.shields.io/travis/RaRe-Technologies/gensim/develop.svg\n.. |Wheel| image:: https://img.shields.io/pypi/wheel/gensim.svg\n\n.. _Travis: https://travis-ci.org/RaRe-Technologies/gensim\n.. _Downloads: https://pypi.python.org/pypi/gensim\n.. _License: http://radimrehurek.com/gensim/about.html\n.. _Wheel: https://pypi.python.org/pypi/gensim\n\nGensim is a Python library for *topic modelling*, *document indexing* and *similarity retrieval* with large corpora.\nTarget audience is the *natural language processing* (NLP) and *information retrieval* (IR) community.\n\nFeatures\n---------\n\n* All algorithms are **memory-independent** w.r.t. the corpus size (can process input larger than RAM, streamed, out-of-core),\n* **Intuitive interfaces**\n\n * easy to plug in your own input corpus/datastream (simple streaming API)\n * easy to extend with other Vector Space algorithms (simple transformation API)\n\n* Efficient multicore implementations of popular algorithms, such as online **Latent Semantic Analysis (LSA/LSI/SVD)**,\n **Latent Dirichlet Allocation (LDA)**, **Random Projections (RP)**, **Hierarchical Dirichlet Process (HDP)** or **word2vec deep learning**.\n* **Distributed computing**: can run *Latent Semantic Analysis* and *Latent Dirichlet Allocation* on a cluster of computers.\n* Extensive `documentation and Jupyter Notebook tutorials <https://github.com/RaRe-Technologies/gensim/#documentation>`_.\n\n\nIf this feature list left you scratching your head, you can first read more about the `Vector\nSpace Model <http://en.wikipedia.org/wiki/Vector_space_model>`_ and `unsupervised\ndocument analysis <http://en.wikipedia.org/wiki/Latent_semantic_indexing>`_ on Wikipedia.\n\nInstallation\n------------\n\nThis software depends on `NumPy and Scipy <http://www.scipy.org/Download>`_, two Python packages for scientific computing.\nYou must have them installed prior to installing `gensim`.\n\nIt is also recommended you install a fast BLAS library before installing NumPy. This is optional, but using an optimized BLAS such as `ATLAS <http://math-atlas.sourceforge.net/>`_ or `OpenBLAS <http://xianyi.github.io/OpenBLAS/>`_ is known to improve performance by as much as an order of magnitude. On OS X, NumPy picks up the BLAS that comes with it automatically, so you don't need to do anything special.\n\nInstall the latest version of gensim::\n\n pip install --upgrade gensim\n\nOr, if you have instead downloaded and unzipped the `source tar.gz <http://pypi.python.org/pypi/gensim>`_ package::\n\n python setup.py install\n\n\nFor alternative modes of installation, see the `documentation <http://radimrehurek.com/gensim/install.html>`_.\n\nGensim is being `continuously tested <https://travis-ci.org/RaRe-Technologies/gensim>`_ under Python 3.5, 3.6, 3.7 and 3.8.\nSupport for Python 2.7 was dropped in gensim 4.0.0 – install gensim 3.8.3 if you must use Python 2.7.\n\n\nHow come gensim is so fast and memory efficient? Isn't it pure Python, and isn't Python slow and greedy?\n--------------------------------------------------------------------------------------------------------\n\nMany scientific algorithms can be expressed in terms of large matrix operations (see the BLAS note above). Gensim taps into these low-level BLAS libraries, by means of its dependency on NumPy. So while gensim-the-top-level-code is pure Python, it actually executes highly optimized Fortran/C under the hood, including multithreading (if your BLAS is so configured).\n\nMemory-wise, gensim makes heavy use of Python's built-in generators and iterators for streamed data processing. Memory efficiency was one of gensim's `design goals <http://radimrehurek.com/gensim/about.html>`_, and is a central feature of gensim, rather than something bolted on as an afterthought.\n\nDocumentation\n-------------\n* `QuickStart`_\n* `Tutorials`_\n* `Tutorial Videos`_\n* `Official Documentation and Walkthrough`_\n\nCiting gensim\n-------------\n\nWhen `citing gensim in academic papers and theses <https://scholar.google.cz/citations?view_op=view_citation&hl=en&user=9vG_kV0AAAAJ&citation_for_view=9vG_kV0AAAAJ:u-x6o8ySG0sC>`_, please use this BibTeX entry::\n\n @inproceedings{rehurek_lrec,\n title = {{Software Framework for Topic Modelling with Large Corpora}},\n author = {Radim {\\\\v R}eh{\\\\r u}{\\\\v r}ek and Petr Sojka},\n booktitle = {{Proceedings of the LREC 2010 Workshop on New\n Challenges for NLP Frameworks}},\n pages = {45--50},\n year = 2010,\n month = May,\n day = 22,\n publisher = {ELRA},\n address = {Valletta, Malta},\n language={English}\n }\n\n----------------\n\nGensim is open source software released under the `GNU LGPLv2.1 license <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html>`_.\nCopyright (c) 2009-now Radim Rehurek\n\n|Analytics|_\n\n.. |Analytics| image:: https://ga-beacon.appspot.com/UA-24066335-5/your-repo/page-name\n.. _Analytics: https://github.com/igrigorik/ga-beacon\n.. _Official Documentation and Walkthrough: http://radimrehurek.com/gensim/\n.. _Tutorials: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials\n.. _Tutorial Videos: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#videos\n.. _QuickStart: https://radimrehurek.com/gensim/gensim_numfocus/auto_examples/core/run_core_concepts.html\n\n\"\"\"\n\ndistributed_env = ['Pyro4 >= 4.27']\n\nwin_testenv = [\n 'pytest',\n 'pytest-rerunfailures',\n 'mock',\n 'cython',\n 'nmslib',\n 'pyemd',\n 'testfixtures',\n 'Morfessor==2.0.2a4',\n 'python-Levenshtein >= 0.10.2',\n 'scikit-learn',\n # The following packages are commented out because they don't install on Windows. So skip the\n # related tests in AppVeyor. We still test them in Linux via Travis, see linux_testenv below.\n # See https://github.com/RaRe-Technologies/gensim/pull/2814\n # 'tensorflow',\n # 'keras',\n]\n\n# This list partially duplicates requirements_docs.txt.\n# The main difference is that we don't include version pins here unless\n# absolutely necessary, whereas requirements_docs.txt includes pins for\n# everything, by design.\n#\n# For more info about the difference between the two:\n#\n# https://packaging.python.org/discussions/install-requires-vs-requirements/\n#\nvisdom_req = ['visdom >= 0.1.8, != 0.1.8.7']\ndocs_testenv = win_testenv + distributed_env + visdom_req + [\n 'sphinx <= 2.4.4', # avoid `sphinx >= 3.0` that breaks the build\n 'sphinx-gallery',\n 'sphinxcontrib.programoutput',\n 'sphinxcontrib-napoleon',\n 'matplotlib', # expected by sphinx-gallery\n 'plotly',\n #\n # Pattern is a PITA to install, it requires mysqlclient, which in turn\n # requires MySQL dev tools be installed. We don't need it for building\n # documentation.\n #\n # 'Pattern==3.6', # Need 3.6 or later for Py3 support\n 'memory_profiler',\n 'annoy',\n 'Pyro4',\n 'scikit-learn',\n 'nltk',\n 'testfixtures',\n 'statsmodels',\n 'pyemd',\n 'pandas',\n]\n\n# Add additional requirements for testing on Linux. We skip some tests on Windows,\n# because the libraries below are too tricky to install there.\nlinux_testenv = win_testenv[:] + visdom_req\nif sys.version_info >= (3, 7):\n # HACK: Installing tensorflow causes a segfault in Travis on py3.6. Other Pythons work – a mystery.\n # See https://github.com/RaRe-Technologies/gensim/pull/2814#issuecomment-621477948\n linux_testenv += [\n 'tensorflow',\n 'keras==2.3.1',\n ]\n\nNUMPY_STR = 'numpy >= 1.11.3'\n#\n# We pin the Cython version for reproducibility. We expect our extensions\n# to build with any sane version of Cython, so we should update this pin\n# periodically.\n#\nCYTHON_STR = 'Cython==0.29.14'\n\ninstall_requires = [\n NUMPY_STR,\n 'scipy >= 0.18.1',\n 'six >= 1.5.0',\n 'smart_open >= 1.8.1',\n]\n\nsetup_requires = [NUMPY_STR]\n\nif need_cython():\n install_requires.append(CYTHON_STR)\n setup_requires.append(CYTHON_STR)\n\nsetup(\n name='gensim',\n version='3.8.1',\n description='Python framework for fast Vector Space Modelling',\n long_description=LONG_DESCRIPTION,\n\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=find_packages(),\n\n author=u'Radim Rehurek',\n author_email='[email protected]',\n\n url='http://radimrehurek.com/gensim',\n download_url='http://pypi.python.org/pypi/gensim',\n\n license='LGPLv2.1',\n\n keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '\n 'LSA, LSI, Latent Dirichlet Allocation, LDA, '\n 'Hierarchical Dirichlet Process, HDP, Random Projections, '\n 'TFIDF, word2vec',\n\n platforms='any',\n\n zip_safe=False,\n\n classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Text Processing :: Linguistic',\n ],\n\n test_suite=\"gensim.test\",\n python_requires='>=3.5',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=linux_testenv,\n extras_require={\n 'distributed': distributed_env,\n 'test-win': win_testenv,\n 'test': linux_testenv,\n 'docs': docs_testenv,\n },\n\n include_package_data=True,\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2014 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nRun with::\n\n python ./setup.py install\n\"\"\"\n\nimport distutils.cmd\nimport distutils.log\nimport itertools\nimport os.path\nimport platform\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.build_ext import build_ext\n\n\nc_extensions = {\n 'gensim.models.word2vec_inner': 'gensim/models/word2vec_inner.c',\n 'gensim.corpora._mmreader': 'gensim/corpora/_mmreader.c',\n 'gensim.models.fasttext_inner': 'gensim/models/fasttext_inner.c',\n 'gensim.models._utils_any2vec': 'gensim/models/_utils_any2vec.c',\n 'gensim._matutils': 'gensim/_matutils.c',\n 'gensim.models.nmf_pgd': 'gensim/models/nmf_pgd.c',\n}\n\ncpp_extensions = {\n 'gensim.models.doc2vec_inner': 'gensim/models/doc2vec_inner.cpp',\n 'gensim.models.word2vec_corpusfile': 'gensim/models/word2vec_corpusfile.cpp',\n 'gensim.models.fasttext_corpusfile': 'gensim/models/fasttext_corpusfile.cpp',\n 'gensim.models.doc2vec_corpusfile': 'gensim/models/doc2vec_corpusfile.cpp',\n}\n\n\ndef need_cython():\n \"\"\"Return True if we need Cython to translate any of the extensions.\n\n If the extensions have already been translated to C/C++, then we don't need\n to install Cython and perform the translation.\"\"\"\n expected = list(c_extensions.values()) + list(cpp_extensions.values())\n return any([not os.path.isfile(f) for f in expected])\n\n\ndef make_c_ext(use_cython=False):\n for module, source in c_extensions.items():\n if use_cython:\n source = source.replace('.c', '.pyx')\n yield Extension(module, sources=[source], language='c')\n\n\ndef make_cpp_ext(use_cython=False):\n extra_args = []\n system = platform.system()\n\n if system == 'Linux':\n extra_args.append('-std=c++11')\n elif system == 'Darwin':\n extra_args.extend(['-stdlib=libc++', '-std=c++11'])\n\n for module, source in cpp_extensions.items():\n if use_cython:\n source = source.replace('.cpp', '.pyx')\n yield Extension(\n module,\n sources=[source],\n language='c++',\n extra_compile_args=extra_args,\n extra_link_args=extra_args,\n )\n\n\n#\n# We use use_cython=False here for two reasons:\n#\n# 1. Cython may not be available at this stage\n# 2. The actual translation from Cython to C/C++ happens inside CustomBuildExt\n#\next_modules = list(itertools.chain(make_c_ext(use_cython=False), make_cpp_ext(use_cython=False)))\n\n\nclass CustomBuildExt(build_ext):\n \"\"\"Custom build_ext action with bootstrapping.\n\n We need this in order to use numpy and Cython in this script without\n importing them at module level, because they may not be available yet.\n \"\"\"\n #\n # http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py\n #\n def finalize_options(self):\n build_ext.finalize_options(self)\n # Prevent numpy from thinking it is still in its setup process:\n # https://docs.python.org/2/library/__builtin__.html#module-__builtin__\n __builtins__.__NUMPY_SETUP__ = False\n\n import numpy\n self.include_dirs.append(numpy.get_include())\n\n if need_cython():\n import Cython.Build\n Cython.Build.cythonize(list(make_c_ext(use_cython=True)))\n Cython.Build.cythonize(list(make_cpp_ext(use_cython=True)))\n\n\nclass CleanExt(distutils.cmd.Command):\n description = 'Remove C sources, C++ sources and binaries for gensim extensions'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for root, dirs, files in os.walk('gensim'):\n files = [\n os.path.join(root, f)\n for f in files\n if os.path.splitext(f)[1] in ('.c', '.cpp', '.so')\n ]\n for f in files:\n self.announce('removing %s' % f, level=distutils.log.INFO)\n os.unlink(f)\n\n if os.path.isdir('build'):\n self.announce('recursively removing build', level=distutils.log.INFO)\n shutil.rmtree('build')\n\n\ncmdclass = {'build_ext': CustomBuildExt, 'clean_ext': CleanExt}\n\nWHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}\nif WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):\n import wheelhouse_uploader.cmd\n cmdclass.update(vars(wheelhouse_uploader.cmd))\n\n\nLONG_DESCRIPTION = u\"\"\"\n==============================================\ngensim -- Topic Modelling in Python\n==============================================\n\n|Travis|_\n|Wheel|_\n\n.. |Travis| image:: https://img.shields.io/travis/RaRe-Technologies/gensim/develop.svg\n.. |Wheel| image:: https://img.shields.io/pypi/wheel/gensim.svg\n\n.. _Travis: https://travis-ci.org/RaRe-Technologies/gensim\n.. _Downloads: https://pypi.python.org/pypi/gensim\n.. _License: http://radimrehurek.com/gensim/about.html\n.. _Wheel: https://pypi.python.org/pypi/gensim\n\nGensim is a Python library for *topic modelling*, *document indexing* and *similarity retrieval* with large corpora.\nTarget audience is the *natural language processing* (NLP) and *information retrieval* (IR) community.\n\nFeatures\n---------\n\n* All algorithms are **memory-independent** w.r.t. the corpus size (can process input larger than RAM, streamed, out-of-core),\n* **Intuitive interfaces**\n\n * easy to plug in your own input corpus/datastream (simple streaming API)\n * easy to extend with other Vector Space algorithms (simple transformation API)\n\n* Efficient multicore implementations of popular algorithms, such as online **Latent Semantic Analysis (LSA/LSI/SVD)**,\n **Latent Dirichlet Allocation (LDA)**, **Random Projections (RP)**, **Hierarchical Dirichlet Process (HDP)** or **word2vec deep learning**.\n* **Distributed computing**: can run *Latent Semantic Analysis* and *Latent Dirichlet Allocation* on a cluster of computers.\n* Extensive `documentation and Jupyter Notebook tutorials <https://github.com/RaRe-Technologies/gensim/#documentation>`_.\n\n\nIf this feature list left you scratching your head, you can first read more about the `Vector\nSpace Model <http://en.wikipedia.org/wiki/Vector_space_model>`_ and `unsupervised\ndocument analysis <http://en.wikipedia.org/wiki/Latent_semantic_indexing>`_ on Wikipedia.\n\nInstallation\n------------\n\nThis software depends on `NumPy and Scipy <http://www.scipy.org/Download>`_, two Python packages for scientific computing.\nYou must have them installed prior to installing `gensim`.\n\nIt is also recommended you install a fast BLAS library before installing NumPy. This is optional, but using an optimized BLAS such as `ATLAS <http://math-atlas.sourceforge.net/>`_ or `OpenBLAS <http://xianyi.github.io/OpenBLAS/>`_ is known to improve performance by as much as an order of magnitude. On OS X, NumPy picks up the BLAS that comes with it automatically, so you don't need to do anything special.\n\nInstall the latest version of gensim::\n\n pip install --upgrade gensim\n\nOr, if you have instead downloaded and unzipped the `source tar.gz <http://pypi.python.org/pypi/gensim>`_ package::\n\n python setup.py install\n\n\nFor alternative modes of installation, see the `documentation <http://radimrehurek.com/gensim/install.html>`_.\n\nGensim is being `continuously tested <https://travis-ci.org/RaRe-Technologies/gensim>`_ under Python 3.5, 3.6, 3.7 and 3.8.\nSupport for Python 2.7 was dropped in gensim 4.0.0 – install gensim 3.8.3 if you must use Python 2.7.\n\n\nHow come gensim is so fast and memory efficient? Isn't it pure Python, and isn't Python slow and greedy?\n--------------------------------------------------------------------------------------------------------\n\nMany scientific algorithms can be expressed in terms of large matrix operations (see the BLAS note above). Gensim taps into these low-level BLAS libraries, by means of its dependency on NumPy. So while gensim-the-top-level-code is pure Python, it actually executes highly optimized Fortran/C under the hood, including multithreading (if your BLAS is so configured).\n\nMemory-wise, gensim makes heavy use of Python's built-in generators and iterators for streamed data processing. Memory efficiency was one of gensim's `design goals <http://radimrehurek.com/gensim/about.html>`_, and is a central feature of gensim, rather than something bolted on as an afterthought.\n\nDocumentation\n-------------\n* `QuickStart`_\n* `Tutorials`_\n* `Tutorial Videos`_\n* `Official Documentation and Walkthrough`_\n\nCiting gensim\n-------------\n\nWhen `citing gensim in academic papers and theses <https://scholar.google.cz/citations?view_op=view_citation&hl=en&user=9vG_kV0AAAAJ&citation_for_view=9vG_kV0AAAAJ:u-x6o8ySG0sC>`_, please use this BibTeX entry::\n\n @inproceedings{rehurek_lrec,\n title = {{Software Framework for Topic Modelling with Large Corpora}},\n author = {Radim {\\\\v R}eh{\\\\r u}{\\\\v r}ek and Petr Sojka},\n booktitle = {{Proceedings of the LREC 2010 Workshop on New\n Challenges for NLP Frameworks}},\n pages = {45--50},\n year = 2010,\n month = May,\n day = 22,\n publisher = {ELRA},\n address = {Valletta, Malta},\n language={English}\n }\n\n----------------\n\nGensim is open source software released under the `GNU LGPLv2.1 license <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html>`_.\nCopyright (c) 2009-now Radim Rehurek\n\n|Analytics|_\n\n.. |Analytics| image:: https://ga-beacon.appspot.com/UA-24066335-5/your-repo/page-name\n.. _Analytics: https://github.com/igrigorik/ga-beacon\n.. _Official Documentation and Walkthrough: http://radimrehurek.com/gensim/\n.. _Tutorials: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials\n.. _Tutorial Videos: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#videos\n.. _QuickStart: https://radimrehurek.com/gensim/gensim_numfocus/auto_examples/core/run_core_concepts.html\n\n\"\"\"\n\ndistributed_env = ['Pyro4 >= 4.27']\n\nwin_testenv = [\n 'pytest',\n 'pytest-rerunfailures',\n 'mock',\n 'cython',\n 'nmslib',\n 'pyemd',\n 'testfixtures',\n 'Morfessor==2.0.2a4',\n 'python-Levenshtein >= 0.10.2',\n 'scikit-learn',\n # The following packages are commented out because they don't install on Windows. So skip the\n # related tests in AppVeyor. We still test them in Linux via Travis, see linux_testenv below.\n # See https://github.com/RaRe-Technologies/gensim/pull/2814\n # 'tensorflow',\n # 'keras',\n]\n\n# This list partially duplicates requirements_docs.txt.\n# The main difference is that we don't include version pins here unless\n# absolutely necessary, whereas requirements_docs.txt includes pins for\n# everything, by design.\n#\n# For more info about the difference between the two:\n#\n# https://packaging.python.org/discussions/install-requires-vs-requirements/\n#\nvisdom_req = ['visdom >= 0.1.8, != 0.1.8.7']\ndocs_testenv = win_testenv + distributed_env + visdom_req + [\n 'sphinx <= 2.4.4', # avoid `sphinx >= 3.0` that breaks the build\n 'sphinx-gallery',\n 'sphinxcontrib.programoutput',\n 'sphinxcontrib-napoleon',\n 'matplotlib', # expected by sphinx-gallery\n 'plotly',\n #\n # Pattern is a PITA to install, it requires mysqlclient, which in turn\n # requires MySQL dev tools be installed. We don't need it for building\n # documentation.\n #\n # 'Pattern==3.6', # Need 3.6 or later for Py3 support\n 'memory_profiler',\n 'annoy',\n 'Pyro4',\n 'scikit-learn',\n 'nltk',\n 'testfixtures',\n 'statsmodels',\n 'pyemd',\n 'pandas',\n]\n\n# Add additional requirements for testing on Linux. We skip some tests on Windows,\n# because the libraries below are too tricky to install there.\nlinux_testenv = win_testenv[:] + visdom_req\nif sys.version_info >= (3, 7):\n # HACK: Installing tensorflow causes a segfault in Travis on py3.6. Other Pythons work – a mystery.\n # See https://github.com/RaRe-Technologies/gensim/pull/2814#issuecomment-621477948\n linux_testenv += [\n 'tensorflow',\n 'keras',\n ]\n\nNUMPY_STR = 'numpy >= 1.11.3'\n#\n# We pin the Cython version for reproducibility. We expect our extensions\n# to build with any sane version of Cython, so we should update this pin\n# periodically.\n#\nCYTHON_STR = 'Cython==0.29.14'\n\ninstall_requires = [\n NUMPY_STR,\n 'scipy >= 0.18.1',\n 'six >= 1.5.0',\n 'smart_open >= 1.8.1',\n]\n\nsetup_requires = [NUMPY_STR]\n\nif need_cython():\n install_requires.append(CYTHON_STR)\n setup_requires.append(CYTHON_STR)\n\nsetup(\n name='gensim',\n version='3.8.1',\n description='Python framework for fast Vector Space Modelling',\n long_description=LONG_DESCRIPTION,\n\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n packages=find_packages(),\n\n author=u'Radim Rehurek',\n author_email='[email protected]',\n\n url='http://radimrehurek.com/gensim',\n download_url='http://pypi.python.org/pypi/gensim',\n\n license='LGPLv2.1',\n\n keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '\n 'LSA, LSI, Latent Dirichlet Allocation, LDA, '\n 'Hierarchical Dirichlet Process, HDP, Random Projections, '\n 'TFIDF, word2vec',\n\n platforms='any',\n\n zip_safe=False,\n\n classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Text Processing :: Linguistic',\n ],\n\n test_suite=\"gensim.test\",\n python_requires='>=3.5',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=linux_testenv,\n extras_require={\n 'distributed': distributed_env,\n 'test-win': win_testenv,\n 'test': linux_testenv,\n 'docs': docs_testenv,\n },\n\n include_package_data=True,\n)\n", "path": "setup.py" } ]
diff --git a/gensim/test/test_keras_integration.py b/gensim/test/test_keras_integration.py index bad0bb8b95..cc7af1892d 100644 --- a/gensim/test/test_keras_integration.py +++ b/gensim/test/test_keras_integration.py @@ -59,7 +59,7 @@ def testEmbeddingLayerCosineSim(self): embedding_b = embedding_layer(input_b) similarity = dot([embedding_a, embedding_b], axes=2, normalize=True) - model = Model(input=[input_a, input_b], output=similarity) + model = Model(inputs=[input_a, input_b], outputs=similarity) model.compile(optimizer='sgd', loss='mse') word_a = 'graph' diff --git a/setup.py b/setup.py index b8545ab61d..38b7319f96 100644 --- a/setup.py +++ b/setup.py @@ -314,7 +314,7 @@ def run(self): # See https://github.com/RaRe-Technologies/gensim/pull/2814#issuecomment-621477948 linux_testenv += [ 'tensorflow', - 'keras==2.3.1', + 'keras', ] NUMPY_STR = 'numpy >= 1.11.3'
pallets__click-2544
zsh completion requires pressing tab twice ``` ❯ black --<TAB> # Nothing will happen ❯ black --<TAB> unsorted --code Format the code passed in as a string. ... # work after second time ``` Expected: ``` ❯ black --<TAB> unsorted --code Format the code passed in as a string. ... # work after first time ``` <details><summary>Original investigation</summary> ```zsh ❯ _BLACK_COMPLETE=zsh_source black #compdef black _black_completion() { local -a completions local -a completions_with_descriptions local -a response (( ! $+commands[black] )) && return 1 response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) _BLACK_COMPLETE=zsh_complete black)}") for type key descr in ${response}; do if [[ "$type" == "plain" ]]; then if [[ "$descr" == "_" ]]; then completions+=("$key") else completions_with_descriptions+=("$key":"$descr") fi elif [[ "$type" == "dir" ]]; then _path_files -/ elif [[ "$type" == "file" ]]; then _path_files -f fi done if [ -n "$completions_with_descriptions" ]; then _describe -V unsorted completions_with_descriptions -U fi if [ -n "$completions" ]; then compadd -U -V unsorted -a completions fi } compdef _black_completion black; ``` that is equivalent to ```zsh _black() { _black_completion() { local -a completions local -a completions_with_descriptions local -a response (( ! $+commands[black] )) && return 1 response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) _BLACK_COMPLETE=zsh_complete black)}") for type key descr in ${response}; do if [[ "$type" == "plain" ]]; then if [[ "$descr" == "_" ]]; then completions+=("$key") else completions_with_descriptions+=("$key":"$descr") fi elif [[ "$type" == "dir" ]]; then _path_files -/ elif [[ "$type" == "file" ]]; then _path_files -f fi done if [ -n "$completions_with_descriptions" ]; then _describe -V unsorted completions_with_descriptions -U fi if [ -n "$completions" ]; then compadd -U -V unsorted -a completions fi } compdef _black_completion black; } compdef _black black # because first line comment ``` So, in the first time, `compdef _black black` tell zsh the completion function is `_black()`, but `_black()` not return any completion items, only define a new function named `_black_completion` and `compdef _black_completion black`. So when the second time, it work. The fix method is remove the nested function definition: ```zsh ❯ _BLACK_COMPLETE=zsh_source black #compdef black local -a completions local -a completions_with_descriptions local -a response (( ! $+commands[black] )) && return 1 response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) _BLACK_COMPLETE=zsh_complete black)}") for type key descr in ${response}; do if [[ "$type" == "plain" ]]; then if [[ "$descr" == "_" ]]; then completions+=("$key") else completions_with_descriptions+=("$key":"$descr") fi elif [[ "$type" == "dir" ]]; then _path_files -/ elif [[ "$type" == "file" ]]; then _path_files -f fi done if [ -n "$completions_with_descriptions" ]; then _describe -V unsorted completions_with_descriptions -U fi if [ -n "$completions" ]; then compadd -U -V unsorted -a completions fi ``` </details>
[ { "content": "import os\nimport re\nimport typing as t\nfrom gettext import gettext as _\n\nfrom .core import Argument\nfrom .core import BaseCommand\nfrom .core import Context\nfrom .core import MultiCommand\nfrom .core import Option\nfrom .core import Parameter\nfrom .core import ParameterSource\nfrom .parser import split_arg_string\nfrom .utils import echo\n\n\ndef shell_complete(\n cli: BaseCommand,\n ctx_args: t.MutableMapping[str, t.Any],\n prog_name: str,\n complete_var: str,\n instruction: str,\n) -> int:\n \"\"\"Perform shell completion for the given CLI program.\n\n :param cli: Command being called.\n :param ctx_args: Extra arguments to pass to\n ``cli.make_context``.\n :param prog_name: Name of the executable in the shell.\n :param complete_var: Name of the environment variable that holds\n the completion instruction.\n :param instruction: Value of ``complete_var`` with the completion\n instruction and shell, in the form ``instruction_shell``.\n :return: Status code to exit with.\n \"\"\"\n shell, _, instruction = instruction.partition(\"_\")\n comp_cls = get_completion_class(shell)\n\n if comp_cls is None:\n return 1\n\n comp = comp_cls(cli, ctx_args, prog_name, complete_var)\n\n if instruction == \"source\":\n echo(comp.source())\n return 0\n\n if instruction == \"complete\":\n echo(comp.complete())\n return 0\n\n return 1\n\n\nclass CompletionItem:\n \"\"\"Represents a completion value and metadata about the value. The\n default metadata is ``type`` to indicate special shell handling,\n and ``help`` if a shell supports showing a help string next to the\n value.\n\n Arbitrary parameters can be passed when creating the object, and\n accessed using ``item.attr``. If an attribute wasn't passed,\n accessing it returns ``None``.\n\n :param value: The completion suggestion.\n :param type: Tells the shell script to provide special completion\n support for the type. Click uses ``\"dir\"`` and ``\"file\"``.\n :param help: String shown next to the value if supported.\n :param kwargs: Arbitrary metadata. The built-in implementations\n don't use this, but custom type completions paired with custom\n shell support could use it.\n \"\"\"\n\n __slots__ = (\"value\", \"type\", \"help\", \"_info\")\n\n def __init__(\n self,\n value: t.Any,\n type: str = \"plain\",\n help: t.Optional[str] = None,\n **kwargs: t.Any,\n ) -> None:\n self.value: t.Any = value\n self.type: str = type\n self.help: t.Optional[str] = help\n self._info = kwargs\n\n def __getattr__(self, name: str) -> t.Any:\n return self._info.get(name)\n\n\n# Only Bash >= 4.4 has the nosort option.\n_SOURCE_BASH = \"\"\"\\\n%(complete_func)s() {\n local IFS=$'\\\\n'\n local response\n\n response=$(env COMP_WORDS=\"${COMP_WORDS[*]}\" COMP_CWORD=$COMP_CWORD \\\n%(complete_var)s=bash_complete $1)\n\n for completion in $response; do\n IFS=',' read type value <<< \"$completion\"\n\n if [[ $type == 'dir' ]]; then\n COMPREPLY=()\n compopt -o dirnames\n elif [[ $type == 'file' ]]; then\n COMPREPLY=()\n compopt -o default\n elif [[ $type == 'plain' ]]; then\n COMPREPLY+=($value)\n fi\n done\n\n return 0\n}\n\n%(complete_func)s_setup() {\n complete -o nosort -F %(complete_func)s %(prog_name)s\n}\n\n%(complete_func)s_setup;\n\"\"\"\n\n_SOURCE_ZSH = \"\"\"\\\n#compdef %(prog_name)s\n\n%(complete_func)s() {\n local -a completions\n local -a completions_with_descriptions\n local -a response\n (( ! $+commands[%(prog_name)s] )) && return 1\n\n response=(\"${(@f)$(env COMP_WORDS=\"${words[*]}\" COMP_CWORD=$((CURRENT-1)) \\\n%(complete_var)s=zsh_complete %(prog_name)s)}\")\n\n for type key descr in ${response}; do\n if [[ \"$type\" == \"plain\" ]]; then\n if [[ \"$descr\" == \"_\" ]]; then\n completions+=(\"$key\")\n else\n completions_with_descriptions+=(\"$key\":\"$descr\")\n fi\n elif [[ \"$type\" == \"dir\" ]]; then\n _path_files -/\n elif [[ \"$type\" == \"file\" ]]; then\n _path_files -f\n fi\n done\n\n if [ -n \"$completions_with_descriptions\" ]; then\n _describe -V unsorted completions_with_descriptions -U\n fi\n\n if [ -n \"$completions\" ]; then\n compadd -U -V unsorted -a completions\n fi\n}\n\ncompdef %(complete_func)s %(prog_name)s;\n\"\"\"\n\n_SOURCE_FISH = \"\"\"\\\nfunction %(complete_func)s\n set -l response (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \\\nCOMP_CWORD=(commandline -t) %(prog_name)s)\n\n for completion in $response\n set -l metadata (string split \",\" $completion)\n\n if test $metadata[1] = \"dir\"\n __fish_complete_directories $metadata[2]\n else if test $metadata[1] = \"file\"\n __fish_complete_path $metadata[2]\n else if test $metadata[1] = \"plain\"\n echo $metadata[2]\n end\n end\nend\n\ncomplete --no-files --command %(prog_name)s --arguments \\\n\"(%(complete_func)s)\"\n\"\"\"\n\n\nclass ShellComplete:\n \"\"\"Base class for providing shell completion support. A subclass for\n a given shell will override attributes and methods to implement the\n completion instructions (``source`` and ``complete``).\n\n :param cli: Command being called.\n :param prog_name: Name of the executable in the shell.\n :param complete_var: Name of the environment variable that holds\n the completion instruction.\n\n .. versionadded:: 8.0\n \"\"\"\n\n name: t.ClassVar[str]\n \"\"\"Name to register the shell as with :func:`add_completion_class`.\n This is used in completion instructions (``{name}_source`` and\n ``{name}_complete``).\n \"\"\"\n\n source_template: t.ClassVar[str]\n \"\"\"Completion script template formatted by :meth:`source`. This must\n be provided by subclasses.\n \"\"\"\n\n def __init__(\n self,\n cli: BaseCommand,\n ctx_args: t.MutableMapping[str, t.Any],\n prog_name: str,\n complete_var: str,\n ) -> None:\n self.cli = cli\n self.ctx_args = ctx_args\n self.prog_name = prog_name\n self.complete_var = complete_var\n\n @property\n def func_name(self) -> str:\n \"\"\"The name of the shell function defined by the completion\n script.\n \"\"\"\n safe_name = re.sub(r\"\\W*\", \"\", self.prog_name.replace(\"-\", \"_\"), re.ASCII)\n return f\"_{safe_name}_completion\"\n\n def source_vars(self) -> t.Dict[str, t.Any]:\n \"\"\"Vars for formatting :attr:`source_template`.\n\n By default this provides ``complete_func``, ``complete_var``,\n and ``prog_name``.\n \"\"\"\n return {\n \"complete_func\": self.func_name,\n \"complete_var\": self.complete_var,\n \"prog_name\": self.prog_name,\n }\n\n def source(self) -> str:\n \"\"\"Produce the shell script that defines the completion\n function. By default this ``%``-style formats\n :attr:`source_template` with the dict returned by\n :meth:`source_vars`.\n \"\"\"\n return self.source_template % self.source_vars()\n\n def get_completion_args(self) -> t.Tuple[t.List[str], str]:\n \"\"\"Use the env vars defined by the shell script to return a\n tuple of ``args, incomplete``. This must be implemented by\n subclasses.\n \"\"\"\n raise NotImplementedError\n\n def get_completions(\n self, args: t.List[str], incomplete: str\n ) -> t.List[CompletionItem]:\n \"\"\"Determine the context and last complete command or parameter\n from the complete args. Call that object's ``shell_complete``\n method to get the completions for the incomplete value.\n\n :param args: List of complete args before the incomplete value.\n :param incomplete: Value being completed. May be empty.\n \"\"\"\n ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args)\n obj, incomplete = _resolve_incomplete(ctx, args, incomplete)\n return obj.shell_complete(ctx, incomplete)\n\n def format_completion(self, item: CompletionItem) -> str:\n \"\"\"Format a completion item into the form recognized by the\n shell script. This must be implemented by subclasses.\n\n :param item: Completion item to format.\n \"\"\"\n raise NotImplementedError\n\n def complete(self) -> str:\n \"\"\"Produce the completion data to send back to the shell.\n\n By default this calls :meth:`get_completion_args`, gets the\n completions, then calls :meth:`format_completion` for each\n completion.\n \"\"\"\n args, incomplete = self.get_completion_args()\n completions = self.get_completions(args, incomplete)\n out = [self.format_completion(item) for item in completions]\n return \"\\n\".join(out)\n\n\nclass BashComplete(ShellComplete):\n \"\"\"Shell completion for Bash.\"\"\"\n\n name = \"bash\"\n source_template = _SOURCE_BASH\n\n def _check_version(self) -> None:\n import subprocess\n\n output = subprocess.run(\n [\"bash\", \"-c\", 'echo \"${BASH_VERSION}\"'], stdout=subprocess.PIPE\n )\n match = re.search(r\"^(\\d+)\\.(\\d+)\\.\\d+\", output.stdout.decode())\n\n if match is not None:\n major, minor = match.groups()\n\n if major < \"4\" or major == \"4\" and minor < \"4\":\n raise RuntimeError(\n _(\n \"Shell completion is not supported for Bash\"\n \" versions older than 4.4.\"\n )\n )\n else:\n raise RuntimeError(\n _(\"Couldn't detect Bash version, shell completion is not supported.\")\n )\n\n def source(self) -> str:\n self._check_version()\n return super().source()\n\n def get_completion_args(self) -> t.Tuple[t.List[str], str]:\n cwords = split_arg_string(os.environ[\"COMP_WORDS\"])\n cword = int(os.environ[\"COMP_CWORD\"])\n args = cwords[1:cword]\n\n try:\n incomplete = cwords[cword]\n except IndexError:\n incomplete = \"\"\n\n return args, incomplete\n\n def format_completion(self, item: CompletionItem) -> str:\n return f\"{item.type},{item.value}\"\n\n\nclass ZshComplete(ShellComplete):\n \"\"\"Shell completion for Zsh.\"\"\"\n\n name = \"zsh\"\n source_template = _SOURCE_ZSH\n\n def get_completion_args(self) -> t.Tuple[t.List[str], str]:\n cwords = split_arg_string(os.environ[\"COMP_WORDS\"])\n cword = int(os.environ[\"COMP_CWORD\"])\n args = cwords[1:cword]\n\n try:\n incomplete = cwords[cword]\n except IndexError:\n incomplete = \"\"\n\n return args, incomplete\n\n def format_completion(self, item: CompletionItem) -> str:\n return f\"{item.type}\\n{item.value}\\n{item.help if item.help else '_'}\"\n\n\nclass FishComplete(ShellComplete):\n \"\"\"Shell completion for Fish.\"\"\"\n\n name = \"fish\"\n source_template = _SOURCE_FISH\n\n def get_completion_args(self) -> t.Tuple[t.List[str], str]:\n cwords = split_arg_string(os.environ[\"COMP_WORDS\"])\n incomplete = os.environ[\"COMP_CWORD\"]\n args = cwords[1:]\n\n # Fish stores the partial word in both COMP_WORDS and\n # COMP_CWORD, remove it from complete args.\n if incomplete and args and args[-1] == incomplete:\n args.pop()\n\n return args, incomplete\n\n def format_completion(self, item: CompletionItem) -> str:\n if item.help:\n return f\"{item.type},{item.value}\\t{item.help}\"\n\n return f\"{item.type},{item.value}\"\n\n\nShellCompleteType = t.TypeVar(\"ShellCompleteType\", bound=t.Type[ShellComplete])\n\n\n_available_shells: t.Dict[str, t.Type[ShellComplete]] = {\n \"bash\": BashComplete,\n \"fish\": FishComplete,\n \"zsh\": ZshComplete,\n}\n\n\ndef add_completion_class(\n cls: ShellCompleteType, name: t.Optional[str] = None\n) -> ShellCompleteType:\n \"\"\"Register a :class:`ShellComplete` subclass under the given name.\n The name will be provided by the completion instruction environment\n variable during completion.\n\n :param cls: The completion class that will handle completion for the\n shell.\n :param name: Name to register the class under. Defaults to the\n class's ``name`` attribute.\n \"\"\"\n if name is None:\n name = cls.name\n\n _available_shells[name] = cls\n\n return cls\n\n\ndef get_completion_class(shell: str) -> t.Optional[t.Type[ShellComplete]]:\n \"\"\"Look up a registered :class:`ShellComplete` subclass by the name\n provided by the completion instruction environment variable. If the\n name isn't registered, returns ``None``.\n\n :param shell: Name the class is registered under.\n \"\"\"\n return _available_shells.get(shell)\n\n\ndef _is_incomplete_argument(ctx: Context, param: Parameter) -> bool:\n \"\"\"Determine if the given parameter is an argument that can still\n accept values.\n\n :param ctx: Invocation context for the command represented by the\n parsed complete args.\n :param param: Argument object being checked.\n \"\"\"\n if not isinstance(param, Argument):\n return False\n\n assert param.name is not None\n value = ctx.params[param.name]\n return (\n param.nargs == -1\n or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE\n or (\n param.nargs > 1\n and isinstance(value, (tuple, list))\n and len(value) < param.nargs\n )\n )\n\n\ndef _start_of_option(ctx: Context, value: str) -> bool:\n \"\"\"Check if the value looks like the start of an option.\"\"\"\n if not value:\n return False\n\n c = value[0]\n return c in ctx._opt_prefixes\n\n\ndef _is_incomplete_option(ctx: Context, args: t.List[str], param: Parameter) -> bool:\n \"\"\"Determine if the given parameter is an option that needs a value.\n\n :param args: List of complete args before the incomplete value.\n :param param: Option object being checked.\n \"\"\"\n if not isinstance(param, Option):\n return False\n\n if param.is_flag or param.count:\n return False\n\n last_option = None\n\n for index, arg in enumerate(reversed(args)):\n if index + 1 > param.nargs:\n break\n\n if _start_of_option(ctx, arg):\n last_option = arg\n\n return last_option is not None and last_option in param.opts\n\n\ndef _resolve_context(\n cli: BaseCommand,\n ctx_args: t.MutableMapping[str, t.Any],\n prog_name: str,\n args: t.List[str],\n) -> Context:\n \"\"\"Produce the context hierarchy starting with the command and\n traversing the complete arguments. This only follows the commands,\n it doesn't trigger input prompts or callbacks.\n\n :param cli: Command being called.\n :param prog_name: Name of the executable in the shell.\n :param args: List of complete args before the incomplete value.\n \"\"\"\n ctx_args[\"resilient_parsing\"] = True\n ctx = cli.make_context(prog_name, args.copy(), **ctx_args)\n args = ctx.protected_args + ctx.args\n\n while args:\n command = ctx.command\n\n if isinstance(command, MultiCommand):\n if not command.chain:\n name, cmd, args = command.resolve_command(ctx, args)\n\n if cmd is None:\n return ctx\n\n ctx = cmd.make_context(name, args, parent=ctx, resilient_parsing=True)\n args = ctx.protected_args + ctx.args\n else:\n sub_ctx = ctx\n\n while args:\n name, cmd, args = command.resolve_command(ctx, args)\n\n if cmd is None:\n return ctx\n\n sub_ctx = cmd.make_context(\n name,\n args,\n parent=ctx,\n allow_extra_args=True,\n allow_interspersed_args=False,\n resilient_parsing=True,\n )\n args = sub_ctx.args\n\n ctx = sub_ctx\n args = [*sub_ctx.protected_args, *sub_ctx.args]\n else:\n break\n\n return ctx\n\n\ndef _resolve_incomplete(\n ctx: Context, args: t.List[str], incomplete: str\n) -> t.Tuple[t.Union[BaseCommand, Parameter], str]:\n \"\"\"Find the Click object that will handle the completion of the\n incomplete value. Return the object and the incomplete value.\n\n :param ctx: Invocation context for the command represented by\n the parsed complete args.\n :param args: List of complete args before the incomplete value.\n :param incomplete: Value being completed. May be empty.\n \"\"\"\n # Different shells treat an \"=\" between a long option name and\n # value differently. Might keep the value joined, return the \"=\"\n # as a separate item, or return the split name and value. Always\n # split and discard the \"=\" to make completion easier.\n if incomplete == \"=\":\n incomplete = \"\"\n elif \"=\" in incomplete and _start_of_option(ctx, incomplete):\n name, _, incomplete = incomplete.partition(\"=\")\n args.append(name)\n\n # The \"--\" marker tells Click to stop treating values as options\n # even if they start with the option character. If it hasn't been\n # given and the incomplete arg looks like an option, the current\n # command will provide option name completions.\n if \"--\" not in args and _start_of_option(ctx, incomplete):\n return ctx.command, incomplete\n\n params = ctx.command.get_params(ctx)\n\n # If the last complete arg is an option name with an incomplete\n # value, the option will provide value completions.\n for param in params:\n if _is_incomplete_option(ctx, args, param):\n return param, incomplete\n\n # It's not an option name or value. The first argument without a\n # parsed value will provide value completions.\n for param in params:\n if _is_incomplete_argument(ctx, param):\n return param, incomplete\n\n # There were no unparsed arguments, the command may be a group that\n # will provide command name completions.\n return ctx.command, incomplete\n", "path": "src/click/shell_completion.py" } ]
[ { "content": "import os\nimport re\nimport typing as t\nfrom gettext import gettext as _\n\nfrom .core import Argument\nfrom .core import BaseCommand\nfrom .core import Context\nfrom .core import MultiCommand\nfrom .core import Option\nfrom .core import Parameter\nfrom .core import ParameterSource\nfrom .parser import split_arg_string\nfrom .utils import echo\n\n\ndef shell_complete(\n cli: BaseCommand,\n ctx_args: t.MutableMapping[str, t.Any],\n prog_name: str,\n complete_var: str,\n instruction: str,\n) -> int:\n \"\"\"Perform shell completion for the given CLI program.\n\n :param cli: Command being called.\n :param ctx_args: Extra arguments to pass to\n ``cli.make_context``.\n :param prog_name: Name of the executable in the shell.\n :param complete_var: Name of the environment variable that holds\n the completion instruction.\n :param instruction: Value of ``complete_var`` with the completion\n instruction and shell, in the form ``instruction_shell``.\n :return: Status code to exit with.\n \"\"\"\n shell, _, instruction = instruction.partition(\"_\")\n comp_cls = get_completion_class(shell)\n\n if comp_cls is None:\n return 1\n\n comp = comp_cls(cli, ctx_args, prog_name, complete_var)\n\n if instruction == \"source\":\n echo(comp.source())\n return 0\n\n if instruction == \"complete\":\n echo(comp.complete())\n return 0\n\n return 1\n\n\nclass CompletionItem:\n \"\"\"Represents a completion value and metadata about the value. The\n default metadata is ``type`` to indicate special shell handling,\n and ``help`` if a shell supports showing a help string next to the\n value.\n\n Arbitrary parameters can be passed when creating the object, and\n accessed using ``item.attr``. If an attribute wasn't passed,\n accessing it returns ``None``.\n\n :param value: The completion suggestion.\n :param type: Tells the shell script to provide special completion\n support for the type. Click uses ``\"dir\"`` and ``\"file\"``.\n :param help: String shown next to the value if supported.\n :param kwargs: Arbitrary metadata. The built-in implementations\n don't use this, but custom type completions paired with custom\n shell support could use it.\n \"\"\"\n\n __slots__ = (\"value\", \"type\", \"help\", \"_info\")\n\n def __init__(\n self,\n value: t.Any,\n type: str = \"plain\",\n help: t.Optional[str] = None,\n **kwargs: t.Any,\n ) -> None:\n self.value: t.Any = value\n self.type: str = type\n self.help: t.Optional[str] = help\n self._info = kwargs\n\n def __getattr__(self, name: str) -> t.Any:\n return self._info.get(name)\n\n\n# Only Bash >= 4.4 has the nosort option.\n_SOURCE_BASH = \"\"\"\\\n%(complete_func)s() {\n local IFS=$'\\\\n'\n local response\n\n response=$(env COMP_WORDS=\"${COMP_WORDS[*]}\" COMP_CWORD=$COMP_CWORD \\\n%(complete_var)s=bash_complete $1)\n\n for completion in $response; do\n IFS=',' read type value <<< \"$completion\"\n\n if [[ $type == 'dir' ]]; then\n COMPREPLY=()\n compopt -o dirnames\n elif [[ $type == 'file' ]]; then\n COMPREPLY=()\n compopt -o default\n elif [[ $type == 'plain' ]]; then\n COMPREPLY+=($value)\n fi\n done\n\n return 0\n}\n\n%(complete_func)s_setup() {\n complete -o nosort -F %(complete_func)s %(prog_name)s\n}\n\n%(complete_func)s_setup;\n\"\"\"\n\n_SOURCE_ZSH = \"\"\"\\\n#compdef %(prog_name)s\n\n%(complete_func)s() {\n local -a completions\n local -a completions_with_descriptions\n local -a response\n (( ! $+commands[%(prog_name)s] )) && return 1\n\n response=(\"${(@f)$(env COMP_WORDS=\"${words[*]}\" COMP_CWORD=$((CURRENT-1)) \\\n%(complete_var)s=zsh_complete %(prog_name)s)}\")\n\n for type key descr in ${response}; do\n if [[ \"$type\" == \"plain\" ]]; then\n if [[ \"$descr\" == \"_\" ]]; then\n completions+=(\"$key\")\n else\n completions_with_descriptions+=(\"$key\":\"$descr\")\n fi\n elif [[ \"$type\" == \"dir\" ]]; then\n _path_files -/\n elif [[ \"$type\" == \"file\" ]]; then\n _path_files -f\n fi\n done\n\n if [ -n \"$completions_with_descriptions\" ]; then\n _describe -V unsorted completions_with_descriptions -U\n fi\n\n if [ -n \"$completions\" ]; then\n compadd -U -V unsorted -a completions\n fi\n}\n\nif [[ $zsh_eval_context[-1] == loadautofunc ]]; then\n # autoload from fpath, call function directly\n %(complete_func)s \"$@\"\nelse\n # eval/source/. command, register function for later\n compdef %(complete_func)s %(prog_name)s\nfi\n\"\"\"\n\n_SOURCE_FISH = \"\"\"\\\nfunction %(complete_func)s\n set -l response (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \\\nCOMP_CWORD=(commandline -t) %(prog_name)s)\n\n for completion in $response\n set -l metadata (string split \",\" $completion)\n\n if test $metadata[1] = \"dir\"\n __fish_complete_directories $metadata[2]\n else if test $metadata[1] = \"file\"\n __fish_complete_path $metadata[2]\n else if test $metadata[1] = \"plain\"\n echo $metadata[2]\n end\n end\nend\n\ncomplete --no-files --command %(prog_name)s --arguments \\\n\"(%(complete_func)s)\"\n\"\"\"\n\n\nclass ShellComplete:\n \"\"\"Base class for providing shell completion support. A subclass for\n a given shell will override attributes and methods to implement the\n completion instructions (``source`` and ``complete``).\n\n :param cli: Command being called.\n :param prog_name: Name of the executable in the shell.\n :param complete_var: Name of the environment variable that holds\n the completion instruction.\n\n .. versionadded:: 8.0\n \"\"\"\n\n name: t.ClassVar[str]\n \"\"\"Name to register the shell as with :func:`add_completion_class`.\n This is used in completion instructions (``{name}_source`` and\n ``{name}_complete``).\n \"\"\"\n\n source_template: t.ClassVar[str]\n \"\"\"Completion script template formatted by :meth:`source`. This must\n be provided by subclasses.\n \"\"\"\n\n def __init__(\n self,\n cli: BaseCommand,\n ctx_args: t.MutableMapping[str, t.Any],\n prog_name: str,\n complete_var: str,\n ) -> None:\n self.cli = cli\n self.ctx_args = ctx_args\n self.prog_name = prog_name\n self.complete_var = complete_var\n\n @property\n def func_name(self) -> str:\n \"\"\"The name of the shell function defined by the completion\n script.\n \"\"\"\n safe_name = re.sub(r\"\\W*\", \"\", self.prog_name.replace(\"-\", \"_\"), re.ASCII)\n return f\"_{safe_name}_completion\"\n\n def source_vars(self) -> t.Dict[str, t.Any]:\n \"\"\"Vars for formatting :attr:`source_template`.\n\n By default this provides ``complete_func``, ``complete_var``,\n and ``prog_name``.\n \"\"\"\n return {\n \"complete_func\": self.func_name,\n \"complete_var\": self.complete_var,\n \"prog_name\": self.prog_name,\n }\n\n def source(self) -> str:\n \"\"\"Produce the shell script that defines the completion\n function. By default this ``%``-style formats\n :attr:`source_template` with the dict returned by\n :meth:`source_vars`.\n \"\"\"\n return self.source_template % self.source_vars()\n\n def get_completion_args(self) -> t.Tuple[t.List[str], str]:\n \"\"\"Use the env vars defined by the shell script to return a\n tuple of ``args, incomplete``. This must be implemented by\n subclasses.\n \"\"\"\n raise NotImplementedError\n\n def get_completions(\n self, args: t.List[str], incomplete: str\n ) -> t.List[CompletionItem]:\n \"\"\"Determine the context and last complete command or parameter\n from the complete args. Call that object's ``shell_complete``\n method to get the completions for the incomplete value.\n\n :param args: List of complete args before the incomplete value.\n :param incomplete: Value being completed. May be empty.\n \"\"\"\n ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args)\n obj, incomplete = _resolve_incomplete(ctx, args, incomplete)\n return obj.shell_complete(ctx, incomplete)\n\n def format_completion(self, item: CompletionItem) -> str:\n \"\"\"Format a completion item into the form recognized by the\n shell script. This must be implemented by subclasses.\n\n :param item: Completion item to format.\n \"\"\"\n raise NotImplementedError\n\n def complete(self) -> str:\n \"\"\"Produce the completion data to send back to the shell.\n\n By default this calls :meth:`get_completion_args`, gets the\n completions, then calls :meth:`format_completion` for each\n completion.\n \"\"\"\n args, incomplete = self.get_completion_args()\n completions = self.get_completions(args, incomplete)\n out = [self.format_completion(item) for item in completions]\n return \"\\n\".join(out)\n\n\nclass BashComplete(ShellComplete):\n \"\"\"Shell completion for Bash.\"\"\"\n\n name = \"bash\"\n source_template = _SOURCE_BASH\n\n def _check_version(self) -> None:\n import subprocess\n\n output = subprocess.run(\n [\"bash\", \"-c\", 'echo \"${BASH_VERSION}\"'], stdout=subprocess.PIPE\n )\n match = re.search(r\"^(\\d+)\\.(\\d+)\\.\\d+\", output.stdout.decode())\n\n if match is not None:\n major, minor = match.groups()\n\n if major < \"4\" or major == \"4\" and minor < \"4\":\n raise RuntimeError(\n _(\n \"Shell completion is not supported for Bash\"\n \" versions older than 4.4.\"\n )\n )\n else:\n raise RuntimeError(\n _(\"Couldn't detect Bash version, shell completion is not supported.\")\n )\n\n def source(self) -> str:\n self._check_version()\n return super().source()\n\n def get_completion_args(self) -> t.Tuple[t.List[str], str]:\n cwords = split_arg_string(os.environ[\"COMP_WORDS\"])\n cword = int(os.environ[\"COMP_CWORD\"])\n args = cwords[1:cword]\n\n try:\n incomplete = cwords[cword]\n except IndexError:\n incomplete = \"\"\n\n return args, incomplete\n\n def format_completion(self, item: CompletionItem) -> str:\n return f\"{item.type},{item.value}\"\n\n\nclass ZshComplete(ShellComplete):\n \"\"\"Shell completion for Zsh.\"\"\"\n\n name = \"zsh\"\n source_template = _SOURCE_ZSH\n\n def get_completion_args(self) -> t.Tuple[t.List[str], str]:\n cwords = split_arg_string(os.environ[\"COMP_WORDS\"])\n cword = int(os.environ[\"COMP_CWORD\"])\n args = cwords[1:cword]\n\n try:\n incomplete = cwords[cword]\n except IndexError:\n incomplete = \"\"\n\n return args, incomplete\n\n def format_completion(self, item: CompletionItem) -> str:\n return f\"{item.type}\\n{item.value}\\n{item.help if item.help else '_'}\"\n\n\nclass FishComplete(ShellComplete):\n \"\"\"Shell completion for Fish.\"\"\"\n\n name = \"fish\"\n source_template = _SOURCE_FISH\n\n def get_completion_args(self) -> t.Tuple[t.List[str], str]:\n cwords = split_arg_string(os.environ[\"COMP_WORDS\"])\n incomplete = os.environ[\"COMP_CWORD\"]\n args = cwords[1:]\n\n # Fish stores the partial word in both COMP_WORDS and\n # COMP_CWORD, remove it from complete args.\n if incomplete and args and args[-1] == incomplete:\n args.pop()\n\n return args, incomplete\n\n def format_completion(self, item: CompletionItem) -> str:\n if item.help:\n return f\"{item.type},{item.value}\\t{item.help}\"\n\n return f\"{item.type},{item.value}\"\n\n\nShellCompleteType = t.TypeVar(\"ShellCompleteType\", bound=t.Type[ShellComplete])\n\n\n_available_shells: t.Dict[str, t.Type[ShellComplete]] = {\n \"bash\": BashComplete,\n \"fish\": FishComplete,\n \"zsh\": ZshComplete,\n}\n\n\ndef add_completion_class(\n cls: ShellCompleteType, name: t.Optional[str] = None\n) -> ShellCompleteType:\n \"\"\"Register a :class:`ShellComplete` subclass under the given name.\n The name will be provided by the completion instruction environment\n variable during completion.\n\n :param cls: The completion class that will handle completion for the\n shell.\n :param name: Name to register the class under. Defaults to the\n class's ``name`` attribute.\n \"\"\"\n if name is None:\n name = cls.name\n\n _available_shells[name] = cls\n\n return cls\n\n\ndef get_completion_class(shell: str) -> t.Optional[t.Type[ShellComplete]]:\n \"\"\"Look up a registered :class:`ShellComplete` subclass by the name\n provided by the completion instruction environment variable. If the\n name isn't registered, returns ``None``.\n\n :param shell: Name the class is registered under.\n \"\"\"\n return _available_shells.get(shell)\n\n\ndef _is_incomplete_argument(ctx: Context, param: Parameter) -> bool:\n \"\"\"Determine if the given parameter is an argument that can still\n accept values.\n\n :param ctx: Invocation context for the command represented by the\n parsed complete args.\n :param param: Argument object being checked.\n \"\"\"\n if not isinstance(param, Argument):\n return False\n\n assert param.name is not None\n value = ctx.params[param.name]\n return (\n param.nargs == -1\n or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE\n or (\n param.nargs > 1\n and isinstance(value, (tuple, list))\n and len(value) < param.nargs\n )\n )\n\n\ndef _start_of_option(ctx: Context, value: str) -> bool:\n \"\"\"Check if the value looks like the start of an option.\"\"\"\n if not value:\n return False\n\n c = value[0]\n return c in ctx._opt_prefixes\n\n\ndef _is_incomplete_option(ctx: Context, args: t.List[str], param: Parameter) -> bool:\n \"\"\"Determine if the given parameter is an option that needs a value.\n\n :param args: List of complete args before the incomplete value.\n :param param: Option object being checked.\n \"\"\"\n if not isinstance(param, Option):\n return False\n\n if param.is_flag or param.count:\n return False\n\n last_option = None\n\n for index, arg in enumerate(reversed(args)):\n if index + 1 > param.nargs:\n break\n\n if _start_of_option(ctx, arg):\n last_option = arg\n\n return last_option is not None and last_option in param.opts\n\n\ndef _resolve_context(\n cli: BaseCommand,\n ctx_args: t.MutableMapping[str, t.Any],\n prog_name: str,\n args: t.List[str],\n) -> Context:\n \"\"\"Produce the context hierarchy starting with the command and\n traversing the complete arguments. This only follows the commands,\n it doesn't trigger input prompts or callbacks.\n\n :param cli: Command being called.\n :param prog_name: Name of the executable in the shell.\n :param args: List of complete args before the incomplete value.\n \"\"\"\n ctx_args[\"resilient_parsing\"] = True\n ctx = cli.make_context(prog_name, args.copy(), **ctx_args)\n args = ctx.protected_args + ctx.args\n\n while args:\n command = ctx.command\n\n if isinstance(command, MultiCommand):\n if not command.chain:\n name, cmd, args = command.resolve_command(ctx, args)\n\n if cmd is None:\n return ctx\n\n ctx = cmd.make_context(name, args, parent=ctx, resilient_parsing=True)\n args = ctx.protected_args + ctx.args\n else:\n sub_ctx = ctx\n\n while args:\n name, cmd, args = command.resolve_command(ctx, args)\n\n if cmd is None:\n return ctx\n\n sub_ctx = cmd.make_context(\n name,\n args,\n parent=ctx,\n allow_extra_args=True,\n allow_interspersed_args=False,\n resilient_parsing=True,\n )\n args = sub_ctx.args\n\n ctx = sub_ctx\n args = [*sub_ctx.protected_args, *sub_ctx.args]\n else:\n break\n\n return ctx\n\n\ndef _resolve_incomplete(\n ctx: Context, args: t.List[str], incomplete: str\n) -> t.Tuple[t.Union[BaseCommand, Parameter], str]:\n \"\"\"Find the Click object that will handle the completion of the\n incomplete value. Return the object and the incomplete value.\n\n :param ctx: Invocation context for the command represented by\n the parsed complete args.\n :param args: List of complete args before the incomplete value.\n :param incomplete: Value being completed. May be empty.\n \"\"\"\n # Different shells treat an \"=\" between a long option name and\n # value differently. Might keep the value joined, return the \"=\"\n # as a separate item, or return the split name and value. Always\n # split and discard the \"=\" to make completion easier.\n if incomplete == \"=\":\n incomplete = \"\"\n elif \"=\" in incomplete and _start_of_option(ctx, incomplete):\n name, _, incomplete = incomplete.partition(\"=\")\n args.append(name)\n\n # The \"--\" marker tells Click to stop treating values as options\n # even if they start with the option character. If it hasn't been\n # given and the incomplete arg looks like an option, the current\n # command will provide option name completions.\n if \"--\" not in args and _start_of_option(ctx, incomplete):\n return ctx.command, incomplete\n\n params = ctx.command.get_params(ctx)\n\n # If the last complete arg is an option name with an incomplete\n # value, the option will provide value completions.\n for param in params:\n if _is_incomplete_option(ctx, args, param):\n return param, incomplete\n\n # It's not an option name or value. The first argument without a\n # parsed value will provide value completions.\n for param in params:\n if _is_incomplete_argument(ctx, param):\n return param, incomplete\n\n # There were no unparsed arguments, the command may be a group that\n # will provide command name completions.\n return ctx.command, incomplete\n", "path": "src/click/shell_completion.py" } ]
diff --git a/CHANGES.rst b/CHANGES.rst index 76c234f6b..674e44901 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -17,6 +17,7 @@ Unreleased - Improve responsiveness of ``click.clear()``. :issue:`2284` - Improve command name detection when using Shiv or PEX. :issue:`2332` - Avoid showing empty lines if command help text is empty. :issue:`2368` +- ZSH completion script works when loaded from ``fpath``. :issue:`2344`. Version 8.1.3 diff --git a/src/click/shell_completion.py b/src/click/shell_completion.py index bb3e48ec3..731be65c6 100644 --- a/src/click/shell_completion.py +++ b/src/click/shell_completion.py @@ -157,7 +157,13 @@ def __getattr__(self, name: str) -> t.Any: fi } -compdef %(complete_func)s %(prog_name)s; +if [[ $zsh_eval_context[-1] == loadautofunc ]]; then + # autoload from fpath, call function directly + %(complete_func)s "$@" +else + # eval/source/. command, register function for later + compdef %(complete_func)s %(prog_name)s +fi """ _SOURCE_FISH = """\
dotkom__onlineweb4-2553
Cant delete mails through REST API endpoints The endpoint to remove mails are fucked :)
[ { "content": "from django.contrib.auth.models import Group\nfrom rest_framework import mixins, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom apps.authentication.models import Email, GroupMember, GroupRole, OnlineGroup\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.authentication.models import Position, SpecialPosition\nfrom apps.authentication.serializers import (\n AnonymizeUserSerializer,\n EmailCreateSerializer,\n EmailReadOnlySerializer,\n EmailUpdateSerializer,\n GroupMemberCreateSerializer,\n GroupMemberReadOnlySerializer,\n GroupMemberUpdateSerializer,\n GroupReadOnlySerializer,\n GroupRoleReadOnlySerializer,\n OnlineGroupCreateOrUpdateSerializer,\n OnlineGroupReadOnlySerializer,\n PasswordUpdateSerializer,\n PositionCreateAndUpdateSerializer,\n PositionReadOnlySerializer,\n SpecialPositionSerializer,\n UserCreateSerializer,\n UserReadOnlySerializer,\n UserUpdateSerializer,\n)\nfrom apps.common.rest_framework.mixins import MultiSerializerMixin\nfrom apps.permissions.drf_permissions import DjangoObjectPermissionOrAnonReadOnly\n\nfrom .filters import UserFilter\nfrom .permissions import IsSelfOrSuperUser\nfrom .serializers.user_data import UserDataSerializer\n\n\nclass UserViewSet(\n MultiSerializerMixin,\n viewsets.GenericViewSet,\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"\n Viewset for User serializer. Supports filtering on 'first_name', 'last_name', 'email'\n \"\"\"\n\n permission_classes = (IsSelfOrSuperUser,)\n filterset_class = UserFilter\n queryset = User.objects.all()\n serializer_classes = {\n \"create\": UserCreateSerializer,\n \"update\": UserUpdateSerializer,\n \"read\": UserReadOnlySerializer,\n \"change_password\": PasswordUpdateSerializer,\n \"anonymize_user\": AnonymizeUserSerializer,\n \"dump_data\": UserDataSerializer,\n }\n\n @action(detail=True, methods=[\"put\"])\n def change_password(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"put\"])\n def anonymize_user(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"get\"], url_path=\"dump-data\")\n def dump_data(self, request, pk: int):\n user: User = self.get_object()\n serializer = self.get_serializer(user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass EmailViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"create\": EmailCreateSerializer,\n \"update\": EmailUpdateSerializer,\n \"read\": EmailReadOnlySerializer,\n }\n\n def get_queryset(self):\n return Email.objects.filter(user=self.request.user)\n\n def destroy(self, request, *args, **kwargs):\n instance: Email = self.get_object()\n if instance.primary:\n return Response(\n {\n \"message\": \"Du kan ikke slette en primær-epost. Du må først velge en annen epost som \"\n \"primær for å kunne slette denne.\"\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n\nclass PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"read\": PositionReadOnlySerializer,\n \"write\": PositionCreateAndUpdateSerializer,\n }\n\n def get_queryset(self):\n user = self.request.user\n return Position.objects.filter(user=user)\n\n\nclass SpecialPositionViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = SpecialPositionSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n user = self.request.user\n return SpecialPosition.objects.filter(user=user)\n\n\nclass GroupViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n queryset = Group.objects.all()\n serializer_class = GroupReadOnlySerializer\n ordering = (\"name\",)\n\n\nclass OnlineGroupViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = OnlineGroup.objects.all()\n serializer_classes = {\n \"write\": OnlineGroupCreateOrUpdateSerializer,\n \"read\": OnlineGroupReadOnlySerializer,\n }\n\n\nclass GroupMemberViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = GroupMember.objects.all()\n serializer_classes = {\n \"create\": GroupMemberCreateSerializer,\n \"update\": GroupMemberUpdateSerializer,\n \"read\": GroupMemberReadOnlySerializer,\n }\n\n\nclass GroupRoleViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n serializer_class = GroupRoleReadOnlySerializer\n queryset = GroupRole.objects.all()\n", "path": "apps/authentication/api/views.py" } ]
[ { "content": "from django.contrib.auth.models import Group\nfrom rest_framework import mixins, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom apps.authentication.models import Email, GroupMember, GroupRole, OnlineGroup\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.authentication.models import Position, SpecialPosition\nfrom apps.authentication.serializers import (\n AnonymizeUserSerializer,\n EmailCreateSerializer,\n EmailReadOnlySerializer,\n EmailUpdateSerializer,\n GroupMemberCreateSerializer,\n GroupMemberReadOnlySerializer,\n GroupMemberUpdateSerializer,\n GroupReadOnlySerializer,\n GroupRoleReadOnlySerializer,\n OnlineGroupCreateOrUpdateSerializer,\n OnlineGroupReadOnlySerializer,\n PasswordUpdateSerializer,\n PositionCreateAndUpdateSerializer,\n PositionReadOnlySerializer,\n SpecialPositionSerializer,\n UserCreateSerializer,\n UserReadOnlySerializer,\n UserUpdateSerializer,\n)\nfrom apps.common.rest_framework.mixins import MultiSerializerMixin\nfrom apps.permissions.drf_permissions import DjangoObjectPermissionOrAnonReadOnly\n\nfrom .filters import UserFilter\nfrom .permissions import IsSelfOrSuperUser\nfrom .serializers.user_data import UserDataSerializer\n\n\nclass UserViewSet(\n MultiSerializerMixin,\n viewsets.GenericViewSet,\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"\n Viewset for User serializer. Supports filtering on 'first_name', 'last_name', 'email'\n \"\"\"\n\n permission_classes = (IsSelfOrSuperUser,)\n filterset_class = UserFilter\n queryset = User.objects.all()\n serializer_classes = {\n \"create\": UserCreateSerializer,\n \"update\": UserUpdateSerializer,\n \"read\": UserReadOnlySerializer,\n \"change_password\": PasswordUpdateSerializer,\n \"anonymize_user\": AnonymizeUserSerializer,\n \"dump_data\": UserDataSerializer,\n }\n\n @action(detail=True, methods=[\"put\"])\n def change_password(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"put\"])\n def anonymize_user(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"get\"], url_path=\"dump-data\")\n def dump_data(self, request, pk: int):\n user: User = self.get_object()\n serializer = self.get_serializer(user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass EmailViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"create\": EmailCreateSerializer,\n \"update\": EmailUpdateSerializer,\n \"read\": EmailReadOnlySerializer,\n }\n\n def get_queryset(self):\n return Email.objects.filter(user=self.request.user)\n\n def destroy(self, request, *args, **kwargs):\n instance: Email = self.get_object()\n if instance.primary:\n return Response(\n {\n \"message\": \"Du kan ikke slette en primær-epost. Du må først velge en annen epost som \"\n \"primær for å kunne slette denne.\"\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n super().destroy(request, *args, **kwargs)\n\n\nclass PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"read\": PositionReadOnlySerializer,\n \"write\": PositionCreateAndUpdateSerializer,\n }\n\n def get_queryset(self):\n user = self.request.user\n return Position.objects.filter(user=user)\n\n\nclass SpecialPositionViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = SpecialPositionSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n user = self.request.user\n return SpecialPosition.objects.filter(user=user)\n\n\nclass GroupViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n queryset = Group.objects.all()\n serializer_class = GroupReadOnlySerializer\n ordering = (\"name\",)\n\n\nclass OnlineGroupViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = OnlineGroup.objects.all()\n serializer_classes = {\n \"write\": OnlineGroupCreateOrUpdateSerializer,\n \"read\": OnlineGroupReadOnlySerializer,\n }\n\n\nclass GroupMemberViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = GroupMember.objects.all()\n serializer_classes = {\n \"create\": GroupMemberCreateSerializer,\n \"update\": GroupMemberUpdateSerializer,\n \"read\": GroupMemberReadOnlySerializer,\n }\n\n\nclass GroupRoleViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n serializer_class = GroupRoleReadOnlySerializer\n queryset = GroupRole.objects.all()\n", "path": "apps/authentication/api/views.py" } ]
diff --git a/apps/authentication/api/views.py b/apps/authentication/api/views.py index fc27a3be1..59aab50a3 100644 --- a/apps/authentication/api/views.py +++ b/apps/authentication/api/views.py @@ -105,6 +105,7 @@ def destroy(self, request, *args, **kwargs): }, status=status.HTTP_400_BAD_REQUEST, ) + super().destroy(request, *args, **kwargs) class PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
pypa__pipenv-930
Pipfile is not terminated by a newline The Pipfile created by pipenv doesn't contain a trailing newline ##### Describe you environment 1. Mac OS Sierra (10.12.6) 1. Python version: I use pyenv, and I've tried this on 2.7.13 and 3.6.3. I doubt this is related to the Python version, but if you can't reproduce let me know and I'll investigate further. 1. pipenv version 8.2.7 ##### Expected result That the Pipfile would contain a trailing newline ##### Actual result ``` hobbes:pipenv-bug cdunklau$ pyenv version 3.6.3 (set by /Users/cdunklau/Development/pipenv-bug/.python-version) hobbes:pipenv-bug cdunklau$ pyenv which pipenv /Users/cdunklau/.pyenv/versions/3.6.3/bin/pipenv hobbes:pipenv-bug cdunklau$ pipenv install --python 3.6.3 Creating a virtualenv for this project… Using /Users/cdunklau/.pyenv/versions/3.6.3/bin/python3.6m to create virtualenv… ⠋Running virtualenv with interpreter /Users/cdunklau/.pyenv/versions/3.6.3/bin/python3.6m Using base prefix '/Users/cdunklau/.pyenv/versions/3.6.3' New python executable in /Users/cdunklau/.local/share/virtualenvs/pipenv-bug-1HVkXapj/bin/python3.6m Also creating executable in /Users/cdunklau/.local/share/virtualenvs/pipenv-bug-1HVkXapj/bin/python Installing setuptools, pip, wheel...done. Virtualenv location: /Users/cdunklau/.local/share/virtualenvs/pipenv-bug-1HVkXapj Creating a Pipfile for this project… Pipfile.lock not found, creating… Locking [dev-packages] dependencies… Locking [packages] dependencies… Updated Pipfile.lock (625834)! Installing dependencies from Pipfile.lock (625834)… 🐍 ▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉▉ 0/0 — 00:00:00 To activate this project's virtualenv, run the following: $ pipenv shell hobbes:pipenv-bug cdunklau$ cat Pipfile [[source]] url = "https://pypi.python.org/simple" verify_ssl = true name = "pypi" [packages] [dev-packages] [requires] python_version = "3.6"hobbes:pipenv-bug cdunklau$ hobbes:pipenv-bug cdunklau$ hexdump -C Pipfile 00000000 5b 5b 73 6f 75 72 63 65 5d 5d 0a 0a 75 72 6c 20 |[[source]]..url | 00000010 3d 20 22 68 74 74 70 73 3a 2f 2f 70 79 70 69 2e |= "https://pypi.| 00000020 70 79 74 68 6f 6e 2e 6f 72 67 2f 73 69 6d 70 6c |python.org/simpl| 00000030 65 22 0a 76 65 72 69 66 79 5f 73 73 6c 20 3d 20 |e".verify_ssl = | 00000040 74 72 75 65 0a 6e 61 6d 65 20 3d 20 22 70 79 70 |true.name = "pyp| 00000050 69 22 0a 0a 0a 5b 70 61 63 6b 61 67 65 73 5d 0a |i"...[packages].| 00000060 0a 0a 0a 5b 64 65 76 2d 70 61 63 6b 61 67 65 73 |...[dev-packages| 00000070 5d 0a 0a 0a 0a 5b 72 65 71 75 69 72 65 73 5d 0a |]....[requires].| 00000080 0a 70 79 74 68 6f 6e 5f 76 65 72 73 69 6f 6e 20 |.python_version | 00000090 3d 20 22 33 2e 36 22 |= "3.6"| 00000097 hobbes:pipenv-bug cdunklau$ ``` ##### Steps to replicate ``` pipenv install cat Pipfile ```
[ { "content": "# -*- coding: utf-8 -*-\nimport os\nimport hashlib\nimport tempfile\nimport sys\nimport shutil\nimport logging\n\nimport click\nimport crayons\nimport delegator\nimport pip\nimport parse\nimport requirements\nimport fuzzywuzzy.process\nimport requests\nimport six\n\nlogging.basicConfig(level=logging.ERROR)\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\nfrom contextlib import contextmanager\nfrom piptools.resolver import Resolver\nfrom piptools.repositories.pypi import PyPIRepository\nfrom piptools.scripts.compile import get_pip_command\nfrom piptools import logging\nfrom piptools.exceptions import NoCandidateFound\nfrom pip.exceptions import DistributionNotFound\nfrom requests.exceptions import HTTPError\n\nfrom .pep508checker import lookup\nfrom .environments import SESSION_IS_INTERACTIVE, PIPENV_MAX_ROUNDS\n\nspecifiers = [k for k in lookup.keys()]\n\n# List of version control systems we support.\nVCS_LIST = ('git', 'svn', 'hg', 'bzr')\nFILE_LIST = ('http://', 'https://', 'ftp://', 'file:///')\n\nrequests = requests.Session()\n\npackages = [\n 'simplejson', 'six', 'botocore', 'python-dateutil', 'pyasn1', 'setuptools',\n 'requests', 'pyyaml', 'docutils', 's3transfer', 'futures', 'pip',\n 'jmespath', 'awscli', 'rsa', 'colorama', 'idna', 'certifi', 'urllib3',\n 'chardet', 'cffi', 'awscli-cwlogs', 'wheel', 'pycparser', 'enum34', 'pbr',\n 'cryptography', 'virtualenv', 'pytz', 'setuptools-scm', 'jinja2',\n 'ipaddress', 'markupsafe', 'boto3', 'asn1crypto', 'boto', 'paramiko',\n 'ptyprocess', 'pexpect', 'pytest-runner', 'psutil', 'flask', 'werkzeug',\n 'bcrypt', 'pynacl', 'sqlalchemy', 'click', 'numpy', 'pyparsing', 'lxml',\n 'pyopenssl', 'future', 'decorator', 'vcversioner', 'mock', 'argparse',\n 'pyasn1-modules', 'jsonschema', 'funcsigs', 'nose', 'tornado', 'httplib2',\n 'protobuf', 'pandas', 'coverage', 'psycopg2', 'pygments', 'oauth2client',\n 'singledispatch', 'itsdangerous', 'pytest', 'functools32', 'docopt',\n 'mccabe', 'babel', 'pillow', 'grpcio', 'backports-abc', 'public',\n 'query-string', 'redis', 'zope-interface',\n 'pyflakes', 'pycrypto', 'wrapt', 'django', 'selenium', 'flake8',\n 'html5lib', 'elasticsearch', 'markdown', 'pycodestyle',\n 'backports-ssl-match-hostname', 'scipy', 'websocket-client', 'lockfile',\n 'ipython', 'beautifulsoup4', 'gevent', 'uritemplate', 'pymysql',\n 'configparser', 'kombu', 'arrow', 'scikit-learn', 'greenlet', 'amqp',\n 'wcwidth', 'googleapis-common-protos', 'bleach',\n 'google-api-python-client', 'gunicorn', 'gitpython', 'typing',\n 'prompt-toolkit', 'google-cloud-core', 'google-gax', 'requests-oauthlib',\n 'stevedore', 'ordereddict', 'traitlets', 'packaging', 'pymongo',\n 'ipython-genutils', 'appdirs', 'celery', 'google-auth', 'cython',\n 'billiard', 'xmltodict', 'pickleshare', 'unittest2', 'simplegeneric',\n 'msgpack-python', 'snowballstemmer', 'sphinx', 'matplotlib', 'pep8',\n 'pylint', 'netaddr', 'flask-restful', 'oauthlib', 'linecache2', 'ply',\n 'traceback2', 'alabaster', 'monotonic', 'olefile', 'isort', 'astroid',\n 'pyjwt', 'lazy-object-proxy', 'imagesize', 'smmap2', 'gitdb2',\n 'incremental', 'contextlib2', 'ndg-httpsclient', 'ujson', 'unidecode',\n 'raven', 'blessings', 'docker-pycreds', 'ansible', 'vine', 'mako',\n 'netifaces', 'retrying', 'attrs', 'requests-toolbelt', 'supervisor',\n 'python-daemon', 'sqlparse', 'prettytable', 'iso8601', 'pytest-cov',\n 'cycler', 'cachetools', 'pyzmq', 'tabulate', 'google-cloud-logging',\n 'tqdm', 'mozsystemmonitor', 'gapic-google-cloud-logging-v2',\n 'blobuploader', 'tzlocal', 'tox', 'pluggy', 'xlrd', 'configobj',\n 'djangorestframework', 'webencodings', 'unicodecsv', 'grpcio-tools',\n 'pystache', 'meld3', 'mysql-python', 'uwsgi', 'oslo-utils',\n 'grpc-google-cloud-logging-v2', 'oslo-i18n', 'nbformat', 'statsd',\n 'debtcollector', 'docker-py', 'oslo-config', 'sphinxcontrib-websupport',\n 'pathlib2', 'parsedatetime', 'ecdsa', 'oslo-serialization',\n 'configargparse', 'backports-weakref', 'backports-functools-lru-cache',\n 'alembic', 'jupyter-core', 'cached-property', 'scandir', 'rfc3986',\n 'frida', 'subprocess32', 'keystoneauth1', 'thrift', 'jedi', 'ccxt',\n 'fabric', 'mistune', 'dnspython', 'service-identity', 'datadog',\n 'python-magic', 'altgraph', 'twisted', 'openpyxl', 'webob', 'macholib',\n 'docker', 'regex', 'python-keystoneclient',\n 'backports-shutil-get-terminal-size', 'zope-component', 'python-editor',\n 'zope-event', 'isodate', 'tensorflow', 'pika', 'anyjson', 'tldextract',\n 'tensorflow-tensorboard', 'pyrfc3339', 'requests-file', 'networkx',\n 'easyprocess', 'dockerpty', 'texttable', 'positional', 'python-augeas',\n 'acme', 'jdcal', 'mmh3', 'dill', 'certbot', 'termcolor', 'nbconvert',\n 'certbot-apache', 'ipykernel', 'python-mimeparse', 'ruamel-yaml',\n 'et-xmlfile', 'letsencrypt', 'opencv-python', 'cmd2', 'w3lib', 'cliff',\n 'jupyter-client', 'ipywidgets', 'passlib', 'gcloud', 'cssselect',\n 'notebook', 'python-swiftclient', 'widgetsnbextension', 'entrypoints',\n 'flask-sqlalchemy', 'kazoo', 'defusedxml', 'pandocfilters', 'python-gflags',\n 'testpath', 'python-memcached', 'keras', 'jsonpatch', 'python-novaclient',\n 'sympy', 'qtconsole', 'freezegun', 'whichcraft', 'docker-compose',\n 'binaryornot', 'blinker', 'cookiecutter', 'azure-common', 'jinja2-time',\n 'poyo', 'certbot-nginx', 'nltk', 'google-cloud-storage', 'sklearn',\n 'pyhocon', 'django-extensions', 'ua-parser', 'os-client-config',\n 'jupyter-console', 'inflection', 'newrelic', 'tempita', 'azure-nspkg',\n 'codecov', 'argh', 'sqlalchemy-migrate', 'requestsexceptions', 'geopy',\n 'azure-storage', 'pytest-xdist', 'jupyter', 'grpc-google-pubsub-v1',\n 'faker', 'execnet', 'constantly', 'grpc-google-logging-v2', 'automat',\n 'argcomplete', 'apipkg', 'wtforms', 'sphinx-rtd-theme', 'aiohttp',\n 'hyperlink', 'py4j', 'multidict', 'django-filter', 'coala', 'crcmod',\n 'jsonpointer', 'pytesseract', 'gax-google-pubsub-v1',\n 'gax-google-logging-v2', 'distribute', 'patsy', 'flask-wtf', 'waitress',\n 'coveralls', 'pyaml', 'bz2file', 'hjson', 'fake-useragent', 'terminado',\n 'pyperclip', 'repoze-lru', 'mysqlclient', 'smart-open', 'theano', 'pycurl',\n 'sqlobject', 'python-glanceclient', 'paste', 'python-cinderclient',\n 'pathspec', 'watchdog', 'testtools', 'plotly', 'python-openstackclient',\n 'scrapy-crawlera', 'pathtools', 'azure', 'flask-login', 'aniso8601',\n 'google-resumable-media', 'python-jenkins', 'slacker', 'xlsxwriter',\n 'async-timeout', 'pyserial', 'openstacksdk', 'python-jose', 'tenacity',\n 'python-slugify', 'keyring', 'pkginfo', 'pastedeploy', 'seaborn',\n 'eventlet', 'google-cloud-bigquery', 'h5py', 'aws-requests-auth',\n 'maxminddb', 's3cmd', 'django-debug-toolbar', 'flask-script',\n 'multi-key-dict', 'fuzzywuzzy', 'fasteners', 'youtube-dl',\n 'pycryptodome', 'smmap', 'gitdb', 'setuptools-git', 'pager',\n 'python-subunit', 'warlock', 'extras', 'capstone', 'httpretty',\n 'factory_boy', 'webtest', 'django-cors-headers', 'codeintel', 'suds',\n 'pyodbc', 'geoip2', 'filechunkio', 'fixtures', 'pysocks', 'statsmodels',\n 'google-auth-httplib2', 'kafka-python', 'applicationinsights', 'yarl',\n 'cassandra-driver', 'azure-mgmt-compute', 'pathlib', 'python-jwt', 'sh',\n 'flask-cors', 'shapely', 'twine', 'taskcluster', 'enum-compat',\n 'python-twitter', 'cookiejar', 'cookies', 'semantic-version', 'slugid',\n 'suds-jurko', 'joblib', 'azure-mgmt-network', 'azure-mgmt-resource',\n 'hiredis', 'pyhawk-with-a-single-extra-commit', 'jws', 'moto', 'bokeh',\n 'ipaddr', 'invoke', 'azure-mgmt-storage', 'pyxdg', 'azure-mgmt-nspkg',\n 'pytest-mock', 'google-cloud-pubsub', 'send2trash', 'yarg', 'subliminal',\n 'pydevd', 'xlwt', 'user-agents', 'python-fanart', 'bs4', 'rtorrent-python',\n 'django-storages', 'tmdbsimple', 'autopep8', 'pysftp', 'ipdb',\n 'setproctitle', 'osc-lib', 'importlib', 'validate-email', 'django-appconf',\n 'bottle', 'hgtools', 'stripe', 'azure-servicebus', 'marshmallow',\n 'voluptuous', 'ptvsd', 'jsonpickle', 'reportlab', 'python-geohash',\n 'dicttoxml', 'ddt', 'secretstorage', 'pytest-django', 'flexget',\n 'httpagentparser', 'beautifulsoup', 'azure-mgmt', 'haversine',\n 'flower', 'sortedcontainers', 'requests-mock',\n 'azure-servicemanagement-legacy', 'flask-migrate', 'pyinotify',\n 'carbon', 'zc-buildout', 'unittest-xml-reporting', 'parse', 'hacking',\n 'mxnet', 'qds-sdk', 'twilio', 'gspread', 'oslo-log', 'pytest-timeout',\n 'python-heatclient', 'oslo-context', 'numexpr', 'toolz', 'adal',\n 'troposphere', 'humanfriendly', 'path-py', 'dogpile-cache', 'plumbum',\n 'gapic-google-cloud-pubsub-v1', 'graphite-web', 'grpc-google-iam-v1',\n 'deprecation', 'mpmath', 'oslo-concurrency', 'feedparser', 'python-ldap',\n 'proto-google-cloud-pubsub-v1', 'pyzabbix', 'humanize', 'colorlog',\n 'msrestazure', 'msrest', 'python-ironicclient', 'pycountry',\n 'email-validator', 'hypothesis', 'coala-bears', 'phonenumbers',\n 'dj-database-url', 'elasticsearch-dsl', 'responses',\n 'python-neutronclient', 'sasl', 'django-nose', 'munch', 'pydns',\n 'proto-google-cloud-datastore-v1', 'apscheduler', 'django-redis',\n 'pytest-forked', 'python-levenshtein', 'dateparser',\n 'google-cloud-datastore', 'pytimeparse', 'pytest-html',\n 'virtualenv-clone', 'zope-deprecation', 'django-rest-swagger',\n 'whitenoise', 'gensim', 'python-consul', 'pypdf2', 'pydispatcher',\n 'scp', 'requires', 'cement', 'cx-oracle', 'graphviz', 'slackclient',\n 'hponeview', 'croniter', 'cssutils', 'appier', 'jsonpath-rw',\n 'requests-futures', 'mrjob', 'cachet', 'influxdb', 'virtualenvwrapper',\n 'appnope', 'pymssql', 'testfixtures', 'glob2', 'django-model-utils',\n 'awsebcli', 'tweepy', 'gapic-google-cloud-datastore-v1', 'coreapi',\n 'bkcharts', 'requests-ntlm', 'sqlalchemy-utils', 'more-itertools',\n 'testrepository', 'blessed', 'jsonfield', 'logilab-common',\n 'flake8-import-order', 'parse-type', 'clint', 'queuelib', 'robotframework',\n 'python-gnupg', 'tensorflow-gpu', 'jira', 'gcdt-bundler',\n 'azure-mgmt-redis', 'avro', 'args', 'pythonwhois', 'pyhamcrest',\n 'scrapy', 'ruamel-ordereddict', 'retry', 'azure-mgmt-batch',\n 'azure-batch', 'junit-xml', 'django-compressor', 'pyvirtualdisplay',\n 'python-openid', 'itypes', 'flask-cache', 'azure-mgmt-keyvault',\n 'pip-tools', 'apache-libcloud', 'inflect', 'django-celery', 'routes',\n 'google-apputils', 'bitarray', 'websockets', 'cherrypy', 'pyhive',\n 'os-testr', 'whoosh', 'django-braces', 'findspark', 'parsel',\n 'zope-exceptions', 'coreschema', 'ntlm-auth', 'fake-factory',\n 'enum', 'googleads', 'iptools', 'google-cloud-translate',\n 'google-cloud', 'pywinrm', 'google-cloud-vision', 'google-cloud-language',\n 'brotlipy', 'google-cloud-bigtable', 'google-cloud-error-reporting',\n 'oslo-messaging', 'zope-testrunner', 'google-cloud-monitoring', 'awacs',\n 'pydocstyle', 'lmdb', 'django-crispy-forms', 'jellyfish',\n 'google-cloud-speech', 'google-cloud-runtimeconfig', 'testscenarios',\n 'first', 'py-zabbix', 'bcdoc', 'azure-mgmt-web', 'google-cloud-dns',\n 'google-cloud-resource-manager', 'google-compute-engine', 'oslo-db',\n 'autobahn', 'ldap3', 'azure-mgmt-monitor', 'proto-google-cloud-logging-v2',\n 'azure-mgmt-trafficmanager', 'pypiwin32', 'azure-mgmt-cdn',\n 'oslo-middleware', 'azure-mgmt-authorization', 'google-cloud-spanner',\n 'python-json-logger', 'datetime', 'eggtestinfo', 'thriftpy', 'nosexcover',\n 'falcon', 'csvkit', 'ggplot', 'pyramid', 'pg8000', 'munkres', 'futurist',\n 'ciso8601', 'azure-graphrbac', 'python-dotenv', 'py2-ipaddress', 'peewee',\n 'brewer2mpl', 'dulwich', 'zeep', 'azure-mgmt-cognitiveservices',\n 'translationstring', 'sendgrid', 'xgboost', 'aws', 'prometheus-client',\n 'runcython', 'azure-mgmt-sql', 'kubernetes', 'oslo-service', 'annoy',\n 'oauth2', 'dbfread', 'mox3', 'wincertstore', 'initools', 'scikit-image',\n 'backport-collections', 'commonmark', 'pyproj', 'behave', 'qrcode',\n 'azure-mgmt-dns', 'azure-datalake-store',\n 'gapic-google-cloud-error-reporting-v1beta1', 'requests-aws4auth',\n 'flask-admin', 'pygame', 'cov-core', 'gapic-google-cloud-spanner-v1',\n 'agate', 'gapic-google-cloud-spanner-admin-database-v1',\n 'openstackdocstheme', 'azure-mgmt-containerregistry',\n 'djangorestframework-jwt',\n 'proto-google-cloud-error-reporting-v1beta1',\n 'proto-google-cloud-spanner-admin-database-v1',\n 'gapic-google-cloud-spanner-admin-instance-v1',\n 'azure-mgmt-datalake-store', 'proto-google-cloud-spanner-v1',\n 'proto-google-cloud-spanner-admin-instance-v1', 'runtime',\n 'azure-mgmt-datalake-analytics', 'oslotest', 'txaio', 'django-mptt',\n 'azure-keyvault', 'azure-mgmt-iothub', 'azure-mgmt-documentdb',\n 'oslo-policy', 'shade', 'pywavelets', 'flask-mail',\n 'azure-mgmt-devtestlabs', 'atx', 'azure-mgmt-scheduler', 'wand',\n 'azure-mgmt-datalake-nspkg', 'azure-mgmt-rdbms', 'empy',\n 'azure-mgmt-common', 'venusian', 'cairocffi', 'pysubnettree',\n 'agate-excel', 'toml', 'pyvmomi', 'oslosphinx', 'cchardet',\n 'requesocks', 'agate-dbf', 'openapi-codec', 'pylibmc', 'reno',\n 'httpbin', 'google-cloud-videointelligence', 'udatetime', 'pyroute2',\n 'flake8-docstrings', 'autograd', 'nodeenv', 'logutils', 'rq',\n 'azure-servicefabric', 'mongoengine', 'pycryptodomex', 'azure-mgmt-logic',\n 'leather', 'agate-sql', 'python-logstash', 'delorean', 'thrift-sasl',\n 'jpype1', 'shutit', 'wordsegment', 'flufl-enum', 'rjsmin', 'html2text',\n 'watchtower', 'pymeta3', 'netius', 'cairosvg', 'pybars3', 'recommonmark',\n 'uritemplate-py', 'fakeredis', 'python3-openid', 'filelock', 'jsmin',\n 'pipenv', 'django-environ', 'pyhs2', 'pep8-naming', 'typed-ast', 'pyusb',\n 'dedupe', 'dateutils', 'tablib', 'luigi', 'pysnmp', 'prettyplotlib',\n 'pre-commit', 'polib', 'jenkinsapi', 'rcssmin', 'ptable', 'multiprocess',\n 'pymc', 'pytest-metadata', 'django-oauth-toolkit', 'django-allauth',\n 'pygithub', 'python-crfsuite', 'python-cdb', 'pydas', 'pytest-cache',\n 'pyspin', 'pypi-publisher', 'pika-pool', 'pulp', 'pyinstaller',\n 'profilehooks', 'jenkins-job-builder', 'clickclick', 'urwid', 'pep257',\n 'sirepo', 'bandit', 'google-apitools', 'zope-proxy', 'cvxopt',\n 'pytest-catchlog', 'pybrain', 'gdata', 'toil', 'mypy',\n 'python2-pythondialog', 'pypng', 'sure', 'yamllint',\n 'robotframework-selenium2library', 'll-xist', 'tempora', 'webassets',\n 'pycadf', 'dropbox', 'pypandoc', 'django-taggit', 'paho-mqtt',\n 'keystonemiddleware', 'livereload', 'psycogreen', 'geocoder', 'ftfy',\n 'yapf', 'glances', 'grequests', 'coloredlogs', 'python-http-client',\n 'parsley', 'nose-exclude', 'transaction', 'flask-swagger', 'homeassistant',\n 'hvac', 'vcrpy', 'github3-py', 'schematics', 'tinycss',\n 'swagger-spec-validator', 'progressbar2', 'pydot', 'backoff', 'pytsite',\n 'scapy', 'attrdict', 'shellescape', 'impyla', 'flatten-dict',\n 'requests-kerberos', 'pykerberos', 'repoze-who', 'mxnet-mkl', 'cssmin',\n 'dask', 'cheroot', 'flake8-polyfill', 'pyotp', 'python-designateclient',\n 'simple-salesforce', 'hupper', 'neutron-lib', 'wavefront-cli', 'deepdiff',\n 'connexion', 'phonenumberslite', 'natsort', 'tox-travis', 'btrees',\n 'rednose', 'flask-testing', 'premailer', 'shortuuid', 'django-countries',\n 'ocflib', 'pylint-plugin-utils', 'pyenchant', 'logging', 'pysmi',\n 'appier-extras', 'zc-recipe-egg', 'oslo-rootwrap', 'flaky', 'libsass',\n 'oslo-versionedobjects', 'ipy', 'pecan', 'diff-match-patch',\n 'oslo-reports', 'google', 'aspen', 'rollbar', 'cobra',\n 'restructuredtext-lint', 'pythonnet', 'line-profiler', 'trollius',\n 'django-bootstrap3', 'pygeoip', 'django-picklefield', 'django-reversion',\n 'cytoolz', 'beaker', 'tooz', 'flask-assets', 'uuid', 'osprofiler',\n 'bitstring', 'naked', 'flask-babel', 'plac', 'semver', 'django-formtools',\n 'python-snappy', 'persistent', 'terminaltables', 'taskflow', 'boxsdk',\n 'cerberus', 'flask-principal', 'thinc', 'spacy', 'pycares', 'pylru',\n 'kafka', 'pkgconfig', 'couchbase', 'python-utils', 'django-localflavor',\n 'django-redis-cache', 'webapp2', 'sqlalchemy-redshift', 'salt',\n 'structlog', 'mandrill', 'googlemaps', 'easy-thumbnails', 'automaton',\n 'webcolors'\n]\n\n\ndef cleanup_toml(tml):\n toml = tml.split('\\n')\n new_toml = []\n\n # Remove all empty lines from TOML.\n for line in toml:\n if line.strip():\n new_toml.append(line)\n\n toml = '\\n'.join(new_toml)\n new_toml = []\n\n # Add newlines between TOML sections.\n for i, line in enumerate(toml.split('\\n')):\n after = False\n # Skip the first line.\n if line.startswith('['):\n if i > 0:\n # Insert a newline before the heading.\n new_toml.append('\\n')\n after = True\n\n new_toml.append(line)\n # Insert a newline after the heading.\n if after:\n new_toml.append('')\n\n toml = '\\n'.join(new_toml)\n return toml\n\n\ndef suggest_package(package):\n \"\"\"Suggests a package name, given a package name.\"\"\"\n if SESSION_IS_INTERACTIVE:\n\n if ('-' in package) or ('[' in package) or ('+' in package):\n THRESHOLD = 90\n else:\n THRESHOLD = 86\n\n # Bypass for speed.\n if package in packages:\n return package\n\n result = fuzzywuzzy.process.extractOne(package, packages)\n\n if result[1] > THRESHOLD:\n return result[0]\n\n\ndef python_version(path_to_python):\n if not path_to_python:\n return None\n\n try:\n c = delegator.run([path_to_python, '--version'], block=False)\n except Exception:\n return None\n output = c.out.strip() or c.err.strip()\n\n @parse.with_pattern(r'.*')\n def allow_empty(text):\n return text\n\n TEMPLATE = 'Python {}.{}.{:d}{:AllowEmpty}'\n parsed = parse.parse(TEMPLATE, output, dict(AllowEmpty=allow_empty))\n if parsed:\n parsed = parsed.fixed\n else:\n return None\n\n return u\"{v[0]}.{v[1]}.{v[2]}\".format(v=parsed)\n\n\ndef shellquote(s):\n \"\"\"Prepares a string for the shell (on Windows too!)\"\"\"\n if s is None:\n return None\n # Additional escaping for windows paths\n if os.name == 'nt':\n s = \"{}\".format(s.replace(\"\\\\\", \"\\\\\\\\\"))\n\n return '\"' + s.replace(\"'\", \"'\\\\''\") + '\"'\n\n\ndef clean_pkg_version(version):\n \"\"\"Uses pip to prepare a package version string, from our internal version.\"\"\"\n return six.u(pep440_version(str(version).replace('==', '')))\n\n\nclass HackedPythonVersion(object):\n \"\"\"A Beautiful hack, which allows us to tell pip which version of Python we're using.\"\"\"\n def __init__(self, python_version, python_path):\n self.python_version = python_version\n self.python_path = python_path\n\n def __enter__(self):\n os.environ['PIP_PYTHON_VERSION'] = str(self.python_version)\n os.environ['PIP_PYTHON_PATH'] = str(self.python_path)\n\n def __exit__(self, *args):\n # Restore original Python version information.\n del os.environ['PIP_PYTHON_VERSION']\n\n\ndef prepare_pip_source_args(sources, pip_args=None):\n if pip_args is None:\n pip_args = []\n\n if sources:\n # Add the source to pip.\n pip_args.extend(['-i', sources[0]['url']])\n\n # Trust the host if it's not verified.\n if not sources[0].get('verify_ssl', True):\n pip_args.extend(['--trusted-host', urlparse(sources[0]['url']).netloc.split(':')[0]])\n\n # Add additional sources as extra indexes.\n if len(sources) > 1:\n for source in sources[1:]:\n pip_args.extend(['--extra-index-url', source['url']])\n\n # Trust the host if it's not verified.\n if not source.get('verify_ssl', True):\n pip_args.extend(['--trusted-host', urlparse(source['url']).netloc.split(':')[0]])\n\n return pip_args\n\n\ndef resolve_deps(deps, which, which_pip, project, sources=None, verbose=False, python=False, clear=False, pre=False):\n \"\"\"Given a list of dependencies, return a resolved list of dependencies,\n using pip-tools -- and their hashes, using the warehouse API / pip.\n \"\"\"\n\n index_lookup = {}\n markers_lookup = {}\n\n python_path = which('python')\n\n with HackedPythonVersion(python_version=python, python_path=python_path):\n\n class PipCommand(pip.basecommand.Command):\n \"\"\"Needed for pip-tools.\"\"\"\n name = 'PipCommand'\n\n constraints = []\n\n for dep in deps:\n t = tempfile.mkstemp(prefix='pipenv-', suffix='-requirement.txt')[1]\n with open(t, 'w') as f:\n f.write(dep)\n\n if dep.startswith('-e '):\n constraint = pip.req.InstallRequirement.from_editable(dep[len('-e '):])\n else:\n constraint = [c for c in pip.req.parse_requirements(t, session=pip._vendor.requests)][0]\n # extra_constraints = []\n\n if ' -i ' in dep:\n index_lookup[constraint.name] = project.get_source(url=dep.split(' -i ')[1]).get('name')\n\n if constraint.markers:\n markers_lookup[constraint.name] = str(constraint.markers).replace('\"', \"'\")\n\n constraints.append(constraint)\n\n pip_command = get_pip_command()\n\n pip_args = []\n\n if sources:\n pip_args = prepare_pip_source_args(sources, pip_args)\n\n if verbose:\n print('Using pip: {0}'.format(' '.join(pip_args)))\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n pypi = PyPIRepository(pip_options=pip_options, session=session)\n\n if verbose:\n logging.log.verbose = True\n\n results = []\n resolved_tree = set()\n\n resolver = Resolver(constraints=constraints, repository=pypi, clear_caches=clear, prereleases=pre)\n # pre-resolve instead of iterating to avoid asking pypi for hashes of editable packages\n try:\n resolved_tree.update(resolver.resolve(max_rounds=PIPENV_MAX_ROUNDS))\n except (NoCandidateFound, DistributionNotFound, HTTPError) as e:\n click.echo(\n '{0}: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\\n '\n 'You can use {1} to bypass this mechanism, then run {2} to inspect the situation.'\n ''.format(\n crayons.red('Warning', bold=True),\n crayons.red('$ pipenv install --skip-lock'),\n crayons.red('$ pipenv graph')\n ),\n err=True)\n click.echo(crayons.blue(e))\n sys.exit(1)\n\n for result in resolved_tree:\n if not result.editable:\n name = pep423_name(result.name)\n version = clean_pkg_version(result.specifier)\n index = index_lookup.get(result.name)\n\n if not markers_lookup.get(result.name):\n markers = str(result.markers) if result.markers and 'extra' not in str(result.markers) else None\n else:\n markers = markers_lookup.get(result.name)\n\n collected_hashes = []\n if 'python.org' in '|'.join([source['url'] for source in sources]):\n try:\n # Grab the hashes from the new warehouse API.\n r = requests.get('https://pypi.org/pypi/{0}/json'.format(name), timeout=10)\n api_releases = r.json()['releases']\n\n cleaned_releases = {}\n for api_version, api_info in api_releases.items():\n cleaned_releases[clean_pkg_version(api_version)] = api_info\n\n for release in cleaned_releases[version]:\n collected_hashes.append(release['digests']['sha256'])\n\n collected_hashes = ['sha256:' + s for s in collected_hashes]\n\n # Collect un-collectable hashes.\n if not collected_hashes:\n collected_hashes = list(list(resolver.resolve_hashes([result]).items())[0][1])\n\n except (ValueError, KeyError):\n if verbose:\n print('Error fetching {}'.format(name))\n\n d = {'name': name, 'version': version, 'hashes': collected_hashes}\n\n if index:\n d.update({'index': index})\n\n if markers:\n d.update({'markers': markers.replace('\"', \"'\")})\n\n results.append(d)\n\n return results\n\n\ndef multi_split(s, split):\n \"\"\"Splits on multiple given separators.\"\"\"\n\n for r in split:\n s = s.replace(r, '|')\n\n return [i for i in s.split('|') if len(i) > 0]\n\n\ndef convert_deps_from_pip(dep):\n \"\"\"\"Converts a pip-formatted dependency to a Pipfile-formatted one.\"\"\"\n\n dependency = {}\n\n req = [r for r in requirements.parse(dep)][0]\n extras = {'extras': req.extras}\n\n # File installs.\n if (req.uri or (os.path.isfile(req.path) if req.path else False) or\n os.path.isfile(req.name)) and not req.vcs:\n # Assign a package name to the file, last 7 of it's sha256 hex digest.\n if not req.uri and not req.path:\n req.path = os.path.abspath(req.name)\n\n hashable_path = req.uri if req.uri else req.path\n req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest()\n req.name = req.name[len(req.name) - 7:]\n\n # {path: uri} TOML (spec 4 I guess...)\n if req.uri:\n dependency[req.name] = {'file': hashable_path}\n else:\n dependency[req.name] = {'path': hashable_path}\n\n # Add --editable if applicable\n if req.editable:\n dependency[req.name].update({'editable': True})\n\n # VCS Installs. Extra check for unparsed git over SSH\n if req.vcs or is_vcs(req.path):\n if req.name is None:\n raise ValueError('pipenv requires an #egg fragment for version controlled '\n 'dependencies. Please install remote dependency '\n 'in the form {0}#egg=<package-name>.'.format(req.uri))\n\n # Extras: e.g. #egg=requests[security]\n if req.extras:\n dependency[req.name] = extras\n\n # Set up this requirement as a proper VCS requirement if it was not\n if not req.vcs and req.path.startswith(VCS_LIST):\n req.vcs = [vcs for vcs in VCS_LIST if req.path.startswith(vcs)][0]\n req.uri = '{0}'.format(req.path)\n req.path = None\n\n # Crop off the git+, etc part.\n dependency.setdefault(req.name, {}).update({req.vcs: req.uri[len(req.vcs) + 1:]})\n\n # Add --editable, if it's there.\n if req.editable:\n dependency[req.name].update({'editable': True})\n\n # Add subdirectory, if it's there\n if req.subdirectory:\n dependency[req.name].update({'subdirectory': req.subdirectory})\n\n # Add the specifier, if it was provided.\n if req.revision:\n dependency[req.name].update({'ref': req.revision})\n\n elif req.extras or req.specs:\n\n specs = None\n # Comparison operators: e.g. Django>1.10\n if req.specs:\n r = multi_split(dep, '!=<>~')\n specs = dep[len(r[0]):]\n dependency[req.name] = specs\n\n # Extras: e.g. requests[socks]\n if req.extras:\n dependency[req.name] = extras\n\n if specs:\n dependency[req.name].update({'version': specs})\n\n # Bare dependencies: e.g. requests\n else:\n dependency[dep] = '*'\n\n # Cleanup when there's multiple values, e.g. -e.\n if len(dependency) > 1:\n for key in dependency.copy():\n if not hasattr(dependency[key], 'keys'):\n del dependency[key]\n\n return dependency\n\n\ndef convert_deps_to_pip(deps, project=None, r=True, include_index=False):\n \"\"\"\"Converts a Pipfile-formatted dependency to a pip-formatted one.\"\"\"\n\n dependencies = []\n\n for dep in deps.keys():\n\n # Default (e.g. '>1.10').\n extra = deps[dep] if isinstance(deps[dep], six.string_types) else ''\n version = ''\n index = ''\n\n # Get rid of '*'.\n if deps[dep] == '*' or str(extra) == '{}':\n extra = ''\n\n hash = ''\n # Support for single hash (spec 1).\n if 'hash' in deps[dep]:\n hash = ' --hash={0}'.format(deps[dep]['hash'])\n\n # Support for multiple hashes (spec 2).\n if 'hashes' in deps[dep]:\n hash = '{0} '.format(''.join([' --hash={0} '.format(h) for h in deps[dep]['hashes']]))\n\n # Support for extras (e.g. requests[socks])\n if 'extras' in deps[dep]:\n extra = '[{0}]'.format(deps[dep]['extras'][0])\n\n if 'version' in deps[dep]:\n if not deps[dep]['version'] == '*':\n version = deps[dep]['version']\n\n # For lockfile format.\n if 'markers' in deps[dep]:\n specs = '; {0}'.format(deps[dep]['markers'])\n else:\n # For pipfile format.\n specs = []\n for specifier in specifiers:\n if specifier in deps[dep]:\n if not deps[dep][specifier] == '*':\n specs.append('{0} {1}'.format(specifier, deps[dep][specifier]))\n if specs:\n specs = '; {0}'.format(' and '.join(specs))\n else:\n specs = ''\n\n if include_index:\n if 'index' in deps[dep]:\n pip_args = prepare_pip_source_args([project.get_source(deps[dep]['index'])])\n index = ' '.join(pip_args)\n\n # Support for version control\n maybe_vcs = [vcs for vcs in VCS_LIST if vcs in deps[dep]]\n vcs = maybe_vcs[0] if maybe_vcs else None\n\n # Support for files.\n if 'file' in deps[dep]:\n extra = '{1}{0}'.format(extra, deps[dep]['file']).strip()\n\n # Flag the file as editable if it is a local relative path\n if 'editable' in deps[dep]:\n dep = '-e '\n else:\n dep = ''\n\n # Support for paths.\n elif 'path' in deps[dep]:\n extra = '{1}{0}'.format(extra, deps[dep]['path']).strip()\n\n # Flag the file as editable if it is a local relative path\n if 'editable' in deps[dep]:\n dep = '-e '\n else:\n dep = ''\n\n if vcs:\n extra = '{0}+{1}'.format(vcs, deps[dep][vcs])\n\n # Support for @refs.\n if 'ref' in deps[dep]:\n extra += '@{0}'.format(deps[dep]['ref'])\n\n extra += '#egg={0}'.format(dep)\n\n # Support for subdirectory\n if 'subdirectory' in deps[dep]:\n extra += '&subdirectory={0}'.format(deps[dep]['subdirectory'])\n\n # Support for editable.\n if 'editable' in deps[dep]:\n # Support for --egg.\n dep = '-e '\n else:\n dep = ''\n\n s = '{0}{1}{2}{3}{4} {5}'.format(dep, extra, version, specs, hash, index).strip()\n dependencies.append(s)\n if not r:\n return dependencies\n\n # Write requirements.txt to tmp directory.\n f = tempfile.NamedTemporaryFile(suffix='-requirements.txt', delete=False)\n f.write('\\n'.join(dependencies).encode('utf-8'))\n return f.name\n\n\ndef mkdir_p(newdir):\n \"\"\"works the way a good mkdir should :)\n - already exists, silently complete\n - regular file in the way, raise an exception\n - parent directory(ies) does not exist, make them as well\n From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/\n \"\"\"\n\n if os.path.isdir(newdir):\n pass\n elif os.path.isfile(newdir):\n raise OSError(\"a file with the same name as the desired dir, '{0}', already exists.\".format(newdir))\n else:\n head, tail = os.path.split(newdir)\n if head and not os.path.isdir(head):\n mkdir_p(head)\n if tail:\n os.mkdir(newdir)\n\n\ndef is_required_version(version, specified_version):\n \"\"\"Check to see if there's a hard requirement for version\n number provided in the Pipfile.\n \"\"\"\n\n # Certain packages may be defined with multiple values.\n if isinstance(specified_version, dict):\n specified_version = specified_version.get('version', '')\n if specified_version.startswith('=='):\n return version.strip() == specified_version.split('==')[1].strip()\n return True\n\n\ndef is_vcs(pipfile_entry):\n \"\"\"Determine if dictionary entry from Pipfile is for a vcs dependency.\"\"\"\n\n if hasattr(pipfile_entry, 'keys'):\n return any(key for key in pipfile_entry.keys() if key in VCS_LIST)\n elif isinstance(pipfile_entry, six.string_types):\n return pipfile_entry.startswith(VCS_LIST)\n return False\n\n\ndef is_file(package):\n \"\"\"Determine if a package name is for a File dependency.\"\"\"\n if hasattr(package, 'keys'):\n return any(key for key in package.keys() if key in ['file', 'path'])\n\n if os.path.exists(str(package)):\n return True\n\n for start in FILE_LIST:\n if str(package).startswith(start):\n return True\n\n return False\n\n\ndef pep440_version(version):\n \"\"\"Normalize version to PEP 440 standards\"\"\"\n\n # Use pip built-in version parser.\n return str(pip.index.parse_version(version))\n\n\ndef pep423_name(name):\n \"\"\"Normalize package name to PEP 423 style standard.\"\"\"\n name = name.lower()\n if any(i not in name for i in (VCS_LIST+FILE_LIST)):\n return name.replace('_', '-')\n else:\n return name\n\n\ndef proper_case(package_name):\n \"\"\"Properly case project name from pypi.org.\"\"\"\n\n # Hit the simple API.\n r = requests.get('https://pypi.org/pypi/{0}/json'.format(package_name), timeout=0.3, stream=True)\n if not r.ok:\n raise IOError('Unable to find package {0} in PyPI repository.'.format(package_name))\n\n r = parse.parse('https://pypi.org/pypi/{name}/json', r.url)\n good_name = r['name']\n\n return good_name\n\n\ndef split_vcs(split_file):\n \"\"\"Split VCS dependencies out from file.\"\"\"\n\n if 'packages' in split_file or 'dev-packages' in split_file:\n sections = ('packages', 'dev-packages')\n elif 'default' in split_file or 'develop' in split_file:\n sections = ('default', 'develop')\n\n # For each vcs entry in a given section, move it to section-vcs.\n for section in sections:\n entries = split_file.get(section, {})\n vcs_dict = dict((k, entries.pop(k)) for k in list(entries.keys()) if is_vcs(entries[k]))\n split_file[section + '-vcs'] = vcs_dict\n\n return split_file\n\n\ndef recase_file(file_dict):\n \"\"\"Recase file before writing to output.\"\"\"\n\n if 'packages' in file_dict or 'dev-packages' in file_dict:\n sections = ('packages', 'dev-packages')\n elif 'default' in file_dict or 'develop' in file_dict:\n sections = ('default', 'develop')\n\n for section in sections:\n file_section = file_dict.get(section, {})\n\n # Try to properly case each key if we can.\n for key in list(file_section.keys()):\n try:\n cased_key = proper_case(key)\n except IOError:\n cased_key = key\n file_section[cased_key] = file_section.pop(key)\n\n return file_dict\n\n\ndef get_windows_path(*args):\n \"\"\"Sanitize a path for windows environments\n\n Accepts an arbitrary list of arguments and makes a clean windows path\"\"\"\n return os.path.normpath(os.path.join(*args))\n\n\ndef find_windows_executable(bin_path, exe_name):\n \"\"\"Given an executable name, search the given location for an executable\"\"\"\n requested_path = get_windows_path(bin_path, exe_name)\n if os.path.exists(requested_path):\n return requested_path\n\n # Ensure we aren't adding two layers of file extensions\n exe_name = os.path.splitext(exe_name)[0]\n files = ['{0}.{1}'.format(exe_name, ext) for ext in ['', 'py', 'exe', 'bat']]\n exec_paths = [get_windows_path(bin_path, f) for f in files]\n exec_files = [filename for filename in exec_paths if os.path.isfile(filename)]\n return exec_files[0]\n\n\ndef walk_up(bottom):\n \"\"\"Mimic os.walk, but walk 'up' instead of down the directory tree.\n From: https://gist.github.com/zdavkeos/1098474\n \"\"\"\n\n bottom = os.path.realpath(bottom)\n\n # Get files in current dir.\n try:\n names = os.listdir(bottom)\n except Exception:\n return\n\n dirs, nondirs = [], []\n for name in names:\n if os.path.isdir(os.path.join(bottom, name)):\n dirs.append(name)\n else:\n nondirs.append(name)\n\n yield bottom, dirs, nondirs\n\n new_path = os.path.realpath(os.path.join(bottom, '..'))\n\n # See if we are at the top.\n if new_path == bottom:\n return\n\n for x in walk_up(new_path):\n yield x\n\n\ndef find_requirements(max_depth=3):\n \"\"\"Returns the path of a Pipfile in parent directories.\"\"\"\n\n i = 0\n for c, d, f in walk_up(os.getcwd()):\n i += 1\n\n if i < max_depth:\n if 'requirements.txt':\n r = os.path.join(c, 'requirements.txt')\n if os.path.isfile(r):\n return r\n raise RuntimeError('No requirements.txt found!')\n\n\n# Borrowed from pew to avoid importing pew which imports psutil\n# See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82\n@contextmanager\ndef temp_environ():\n \"\"\"Allow the ability to set os.environ temporarily\"\"\"\n environ = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(environ)\n\ndef is_valid_url(url):\n \"\"\"Checks if a given string is an url\"\"\"\n pieces = urlparse(url)\n return all([pieces.scheme, pieces.netloc])\n\n\ndef download_file(url, filename):\n \"\"\"Downloads file from url to a path with filename\"\"\"\n r = requests.get(url, stream=True)\n if not r.ok:\n raise IOError('Unable to download file')\n\n r.raw.decode_content = True\n with open(filename, 'wb') as f:\n shutil.copyfileobj(r.raw, f)\n", "path": "pipenv/utils.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nimport os\nimport hashlib\nimport tempfile\nimport sys\nimport shutil\nimport logging\n\nimport click\nimport crayons\nimport delegator\nimport pip\nimport parse\nimport requirements\nimport fuzzywuzzy.process\nimport requests\nimport six\n\nlogging.basicConfig(level=logging.ERROR)\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\nfrom contextlib import contextmanager\nfrom piptools.resolver import Resolver\nfrom piptools.repositories.pypi import PyPIRepository\nfrom piptools.scripts.compile import get_pip_command\nfrom piptools import logging\nfrom piptools.exceptions import NoCandidateFound\nfrom pip.exceptions import DistributionNotFound\nfrom requests.exceptions import HTTPError\n\nfrom .pep508checker import lookup\nfrom .environments import SESSION_IS_INTERACTIVE, PIPENV_MAX_ROUNDS\n\nspecifiers = [k for k in lookup.keys()]\n\n# List of version control systems we support.\nVCS_LIST = ('git', 'svn', 'hg', 'bzr')\nFILE_LIST = ('http://', 'https://', 'ftp://', 'file:///')\n\nrequests = requests.Session()\n\npackages = [\n 'simplejson', 'six', 'botocore', 'python-dateutil', 'pyasn1', 'setuptools',\n 'requests', 'pyyaml', 'docutils', 's3transfer', 'futures', 'pip',\n 'jmespath', 'awscli', 'rsa', 'colorama', 'idna', 'certifi', 'urllib3',\n 'chardet', 'cffi', 'awscli-cwlogs', 'wheel', 'pycparser', 'enum34', 'pbr',\n 'cryptography', 'virtualenv', 'pytz', 'setuptools-scm', 'jinja2',\n 'ipaddress', 'markupsafe', 'boto3', 'asn1crypto', 'boto', 'paramiko',\n 'ptyprocess', 'pexpect', 'pytest-runner', 'psutil', 'flask', 'werkzeug',\n 'bcrypt', 'pynacl', 'sqlalchemy', 'click', 'numpy', 'pyparsing', 'lxml',\n 'pyopenssl', 'future', 'decorator', 'vcversioner', 'mock', 'argparse',\n 'pyasn1-modules', 'jsonschema', 'funcsigs', 'nose', 'tornado', 'httplib2',\n 'protobuf', 'pandas', 'coverage', 'psycopg2', 'pygments', 'oauth2client',\n 'singledispatch', 'itsdangerous', 'pytest', 'functools32', 'docopt',\n 'mccabe', 'babel', 'pillow', 'grpcio', 'backports-abc', 'public',\n 'query-string', 'redis', 'zope-interface',\n 'pyflakes', 'pycrypto', 'wrapt', 'django', 'selenium', 'flake8',\n 'html5lib', 'elasticsearch', 'markdown', 'pycodestyle',\n 'backports-ssl-match-hostname', 'scipy', 'websocket-client', 'lockfile',\n 'ipython', 'beautifulsoup4', 'gevent', 'uritemplate', 'pymysql',\n 'configparser', 'kombu', 'arrow', 'scikit-learn', 'greenlet', 'amqp',\n 'wcwidth', 'googleapis-common-protos', 'bleach',\n 'google-api-python-client', 'gunicorn', 'gitpython', 'typing',\n 'prompt-toolkit', 'google-cloud-core', 'google-gax', 'requests-oauthlib',\n 'stevedore', 'ordereddict', 'traitlets', 'packaging', 'pymongo',\n 'ipython-genutils', 'appdirs', 'celery', 'google-auth', 'cython',\n 'billiard', 'xmltodict', 'pickleshare', 'unittest2', 'simplegeneric',\n 'msgpack-python', 'snowballstemmer', 'sphinx', 'matplotlib', 'pep8',\n 'pylint', 'netaddr', 'flask-restful', 'oauthlib', 'linecache2', 'ply',\n 'traceback2', 'alabaster', 'monotonic', 'olefile', 'isort', 'astroid',\n 'pyjwt', 'lazy-object-proxy', 'imagesize', 'smmap2', 'gitdb2',\n 'incremental', 'contextlib2', 'ndg-httpsclient', 'ujson', 'unidecode',\n 'raven', 'blessings', 'docker-pycreds', 'ansible', 'vine', 'mako',\n 'netifaces', 'retrying', 'attrs', 'requests-toolbelt', 'supervisor',\n 'python-daemon', 'sqlparse', 'prettytable', 'iso8601', 'pytest-cov',\n 'cycler', 'cachetools', 'pyzmq', 'tabulate', 'google-cloud-logging',\n 'tqdm', 'mozsystemmonitor', 'gapic-google-cloud-logging-v2',\n 'blobuploader', 'tzlocal', 'tox', 'pluggy', 'xlrd', 'configobj',\n 'djangorestframework', 'webencodings', 'unicodecsv', 'grpcio-tools',\n 'pystache', 'meld3', 'mysql-python', 'uwsgi', 'oslo-utils',\n 'grpc-google-cloud-logging-v2', 'oslo-i18n', 'nbformat', 'statsd',\n 'debtcollector', 'docker-py', 'oslo-config', 'sphinxcontrib-websupport',\n 'pathlib2', 'parsedatetime', 'ecdsa', 'oslo-serialization',\n 'configargparse', 'backports-weakref', 'backports-functools-lru-cache',\n 'alembic', 'jupyter-core', 'cached-property', 'scandir', 'rfc3986',\n 'frida', 'subprocess32', 'keystoneauth1', 'thrift', 'jedi', 'ccxt',\n 'fabric', 'mistune', 'dnspython', 'service-identity', 'datadog',\n 'python-magic', 'altgraph', 'twisted', 'openpyxl', 'webob', 'macholib',\n 'docker', 'regex', 'python-keystoneclient',\n 'backports-shutil-get-terminal-size', 'zope-component', 'python-editor',\n 'zope-event', 'isodate', 'tensorflow', 'pika', 'anyjson', 'tldextract',\n 'tensorflow-tensorboard', 'pyrfc3339', 'requests-file', 'networkx',\n 'easyprocess', 'dockerpty', 'texttable', 'positional', 'python-augeas',\n 'acme', 'jdcal', 'mmh3', 'dill', 'certbot', 'termcolor', 'nbconvert',\n 'certbot-apache', 'ipykernel', 'python-mimeparse', 'ruamel-yaml',\n 'et-xmlfile', 'letsencrypt', 'opencv-python', 'cmd2', 'w3lib', 'cliff',\n 'jupyter-client', 'ipywidgets', 'passlib', 'gcloud', 'cssselect',\n 'notebook', 'python-swiftclient', 'widgetsnbextension', 'entrypoints',\n 'flask-sqlalchemy', 'kazoo', 'defusedxml', 'pandocfilters', 'python-gflags',\n 'testpath', 'python-memcached', 'keras', 'jsonpatch', 'python-novaclient',\n 'sympy', 'qtconsole', 'freezegun', 'whichcraft', 'docker-compose',\n 'binaryornot', 'blinker', 'cookiecutter', 'azure-common', 'jinja2-time',\n 'poyo', 'certbot-nginx', 'nltk', 'google-cloud-storage', 'sklearn',\n 'pyhocon', 'django-extensions', 'ua-parser', 'os-client-config',\n 'jupyter-console', 'inflection', 'newrelic', 'tempita', 'azure-nspkg',\n 'codecov', 'argh', 'sqlalchemy-migrate', 'requestsexceptions', 'geopy',\n 'azure-storage', 'pytest-xdist', 'jupyter', 'grpc-google-pubsub-v1',\n 'faker', 'execnet', 'constantly', 'grpc-google-logging-v2', 'automat',\n 'argcomplete', 'apipkg', 'wtforms', 'sphinx-rtd-theme', 'aiohttp',\n 'hyperlink', 'py4j', 'multidict', 'django-filter', 'coala', 'crcmod',\n 'jsonpointer', 'pytesseract', 'gax-google-pubsub-v1',\n 'gax-google-logging-v2', 'distribute', 'patsy', 'flask-wtf', 'waitress',\n 'coveralls', 'pyaml', 'bz2file', 'hjson', 'fake-useragent', 'terminado',\n 'pyperclip', 'repoze-lru', 'mysqlclient', 'smart-open', 'theano', 'pycurl',\n 'sqlobject', 'python-glanceclient', 'paste', 'python-cinderclient',\n 'pathspec', 'watchdog', 'testtools', 'plotly', 'python-openstackclient',\n 'scrapy-crawlera', 'pathtools', 'azure', 'flask-login', 'aniso8601',\n 'google-resumable-media', 'python-jenkins', 'slacker', 'xlsxwriter',\n 'async-timeout', 'pyserial', 'openstacksdk', 'python-jose', 'tenacity',\n 'python-slugify', 'keyring', 'pkginfo', 'pastedeploy', 'seaborn',\n 'eventlet', 'google-cloud-bigquery', 'h5py', 'aws-requests-auth',\n 'maxminddb', 's3cmd', 'django-debug-toolbar', 'flask-script',\n 'multi-key-dict', 'fuzzywuzzy', 'fasteners', 'youtube-dl',\n 'pycryptodome', 'smmap', 'gitdb', 'setuptools-git', 'pager',\n 'python-subunit', 'warlock', 'extras', 'capstone', 'httpretty',\n 'factory_boy', 'webtest', 'django-cors-headers', 'codeintel', 'suds',\n 'pyodbc', 'geoip2', 'filechunkio', 'fixtures', 'pysocks', 'statsmodels',\n 'google-auth-httplib2', 'kafka-python', 'applicationinsights', 'yarl',\n 'cassandra-driver', 'azure-mgmt-compute', 'pathlib', 'python-jwt', 'sh',\n 'flask-cors', 'shapely', 'twine', 'taskcluster', 'enum-compat',\n 'python-twitter', 'cookiejar', 'cookies', 'semantic-version', 'slugid',\n 'suds-jurko', 'joblib', 'azure-mgmt-network', 'azure-mgmt-resource',\n 'hiredis', 'pyhawk-with-a-single-extra-commit', 'jws', 'moto', 'bokeh',\n 'ipaddr', 'invoke', 'azure-mgmt-storage', 'pyxdg', 'azure-mgmt-nspkg',\n 'pytest-mock', 'google-cloud-pubsub', 'send2trash', 'yarg', 'subliminal',\n 'pydevd', 'xlwt', 'user-agents', 'python-fanart', 'bs4', 'rtorrent-python',\n 'django-storages', 'tmdbsimple', 'autopep8', 'pysftp', 'ipdb',\n 'setproctitle', 'osc-lib', 'importlib', 'validate-email', 'django-appconf',\n 'bottle', 'hgtools', 'stripe', 'azure-servicebus', 'marshmallow',\n 'voluptuous', 'ptvsd', 'jsonpickle', 'reportlab', 'python-geohash',\n 'dicttoxml', 'ddt', 'secretstorage', 'pytest-django', 'flexget',\n 'httpagentparser', 'beautifulsoup', 'azure-mgmt', 'haversine',\n 'flower', 'sortedcontainers', 'requests-mock',\n 'azure-servicemanagement-legacy', 'flask-migrate', 'pyinotify',\n 'carbon', 'zc-buildout', 'unittest-xml-reporting', 'parse', 'hacking',\n 'mxnet', 'qds-sdk', 'twilio', 'gspread', 'oslo-log', 'pytest-timeout',\n 'python-heatclient', 'oslo-context', 'numexpr', 'toolz', 'adal',\n 'troposphere', 'humanfriendly', 'path-py', 'dogpile-cache', 'plumbum',\n 'gapic-google-cloud-pubsub-v1', 'graphite-web', 'grpc-google-iam-v1',\n 'deprecation', 'mpmath', 'oslo-concurrency', 'feedparser', 'python-ldap',\n 'proto-google-cloud-pubsub-v1', 'pyzabbix', 'humanize', 'colorlog',\n 'msrestazure', 'msrest', 'python-ironicclient', 'pycountry',\n 'email-validator', 'hypothesis', 'coala-bears', 'phonenumbers',\n 'dj-database-url', 'elasticsearch-dsl', 'responses',\n 'python-neutronclient', 'sasl', 'django-nose', 'munch', 'pydns',\n 'proto-google-cloud-datastore-v1', 'apscheduler', 'django-redis',\n 'pytest-forked', 'python-levenshtein', 'dateparser',\n 'google-cloud-datastore', 'pytimeparse', 'pytest-html',\n 'virtualenv-clone', 'zope-deprecation', 'django-rest-swagger',\n 'whitenoise', 'gensim', 'python-consul', 'pypdf2', 'pydispatcher',\n 'scp', 'requires', 'cement', 'cx-oracle', 'graphviz', 'slackclient',\n 'hponeview', 'croniter', 'cssutils', 'appier', 'jsonpath-rw',\n 'requests-futures', 'mrjob', 'cachet', 'influxdb', 'virtualenvwrapper',\n 'appnope', 'pymssql', 'testfixtures', 'glob2', 'django-model-utils',\n 'awsebcli', 'tweepy', 'gapic-google-cloud-datastore-v1', 'coreapi',\n 'bkcharts', 'requests-ntlm', 'sqlalchemy-utils', 'more-itertools',\n 'testrepository', 'blessed', 'jsonfield', 'logilab-common',\n 'flake8-import-order', 'parse-type', 'clint', 'queuelib', 'robotframework',\n 'python-gnupg', 'tensorflow-gpu', 'jira', 'gcdt-bundler',\n 'azure-mgmt-redis', 'avro', 'args', 'pythonwhois', 'pyhamcrest',\n 'scrapy', 'ruamel-ordereddict', 'retry', 'azure-mgmt-batch',\n 'azure-batch', 'junit-xml', 'django-compressor', 'pyvirtualdisplay',\n 'python-openid', 'itypes', 'flask-cache', 'azure-mgmt-keyvault',\n 'pip-tools', 'apache-libcloud', 'inflect', 'django-celery', 'routes',\n 'google-apputils', 'bitarray', 'websockets', 'cherrypy', 'pyhive',\n 'os-testr', 'whoosh', 'django-braces', 'findspark', 'parsel',\n 'zope-exceptions', 'coreschema', 'ntlm-auth', 'fake-factory',\n 'enum', 'googleads', 'iptools', 'google-cloud-translate',\n 'google-cloud', 'pywinrm', 'google-cloud-vision', 'google-cloud-language',\n 'brotlipy', 'google-cloud-bigtable', 'google-cloud-error-reporting',\n 'oslo-messaging', 'zope-testrunner', 'google-cloud-monitoring', 'awacs',\n 'pydocstyle', 'lmdb', 'django-crispy-forms', 'jellyfish',\n 'google-cloud-speech', 'google-cloud-runtimeconfig', 'testscenarios',\n 'first', 'py-zabbix', 'bcdoc', 'azure-mgmt-web', 'google-cloud-dns',\n 'google-cloud-resource-manager', 'google-compute-engine', 'oslo-db',\n 'autobahn', 'ldap3', 'azure-mgmt-monitor', 'proto-google-cloud-logging-v2',\n 'azure-mgmt-trafficmanager', 'pypiwin32', 'azure-mgmt-cdn',\n 'oslo-middleware', 'azure-mgmt-authorization', 'google-cloud-spanner',\n 'python-json-logger', 'datetime', 'eggtestinfo', 'thriftpy', 'nosexcover',\n 'falcon', 'csvkit', 'ggplot', 'pyramid', 'pg8000', 'munkres', 'futurist',\n 'ciso8601', 'azure-graphrbac', 'python-dotenv', 'py2-ipaddress', 'peewee',\n 'brewer2mpl', 'dulwich', 'zeep', 'azure-mgmt-cognitiveservices',\n 'translationstring', 'sendgrid', 'xgboost', 'aws', 'prometheus-client',\n 'runcython', 'azure-mgmt-sql', 'kubernetes', 'oslo-service', 'annoy',\n 'oauth2', 'dbfread', 'mox3', 'wincertstore', 'initools', 'scikit-image',\n 'backport-collections', 'commonmark', 'pyproj', 'behave', 'qrcode',\n 'azure-mgmt-dns', 'azure-datalake-store',\n 'gapic-google-cloud-error-reporting-v1beta1', 'requests-aws4auth',\n 'flask-admin', 'pygame', 'cov-core', 'gapic-google-cloud-spanner-v1',\n 'agate', 'gapic-google-cloud-spanner-admin-database-v1',\n 'openstackdocstheme', 'azure-mgmt-containerregistry',\n 'djangorestframework-jwt',\n 'proto-google-cloud-error-reporting-v1beta1',\n 'proto-google-cloud-spanner-admin-database-v1',\n 'gapic-google-cloud-spanner-admin-instance-v1',\n 'azure-mgmt-datalake-store', 'proto-google-cloud-spanner-v1',\n 'proto-google-cloud-spanner-admin-instance-v1', 'runtime',\n 'azure-mgmt-datalake-analytics', 'oslotest', 'txaio', 'django-mptt',\n 'azure-keyvault', 'azure-mgmt-iothub', 'azure-mgmt-documentdb',\n 'oslo-policy', 'shade', 'pywavelets', 'flask-mail',\n 'azure-mgmt-devtestlabs', 'atx', 'azure-mgmt-scheduler', 'wand',\n 'azure-mgmt-datalake-nspkg', 'azure-mgmt-rdbms', 'empy',\n 'azure-mgmt-common', 'venusian', 'cairocffi', 'pysubnettree',\n 'agate-excel', 'toml', 'pyvmomi', 'oslosphinx', 'cchardet',\n 'requesocks', 'agate-dbf', 'openapi-codec', 'pylibmc', 'reno',\n 'httpbin', 'google-cloud-videointelligence', 'udatetime', 'pyroute2',\n 'flake8-docstrings', 'autograd', 'nodeenv', 'logutils', 'rq',\n 'azure-servicefabric', 'mongoengine', 'pycryptodomex', 'azure-mgmt-logic',\n 'leather', 'agate-sql', 'python-logstash', 'delorean', 'thrift-sasl',\n 'jpype1', 'shutit', 'wordsegment', 'flufl-enum', 'rjsmin', 'html2text',\n 'watchtower', 'pymeta3', 'netius', 'cairosvg', 'pybars3', 'recommonmark',\n 'uritemplate-py', 'fakeredis', 'python3-openid', 'filelock', 'jsmin',\n 'pipenv', 'django-environ', 'pyhs2', 'pep8-naming', 'typed-ast', 'pyusb',\n 'dedupe', 'dateutils', 'tablib', 'luigi', 'pysnmp', 'prettyplotlib',\n 'pre-commit', 'polib', 'jenkinsapi', 'rcssmin', 'ptable', 'multiprocess',\n 'pymc', 'pytest-metadata', 'django-oauth-toolkit', 'django-allauth',\n 'pygithub', 'python-crfsuite', 'python-cdb', 'pydas', 'pytest-cache',\n 'pyspin', 'pypi-publisher', 'pika-pool', 'pulp', 'pyinstaller',\n 'profilehooks', 'jenkins-job-builder', 'clickclick', 'urwid', 'pep257',\n 'sirepo', 'bandit', 'google-apitools', 'zope-proxy', 'cvxopt',\n 'pytest-catchlog', 'pybrain', 'gdata', 'toil', 'mypy',\n 'python2-pythondialog', 'pypng', 'sure', 'yamllint',\n 'robotframework-selenium2library', 'll-xist', 'tempora', 'webassets',\n 'pycadf', 'dropbox', 'pypandoc', 'django-taggit', 'paho-mqtt',\n 'keystonemiddleware', 'livereload', 'psycogreen', 'geocoder', 'ftfy',\n 'yapf', 'glances', 'grequests', 'coloredlogs', 'python-http-client',\n 'parsley', 'nose-exclude', 'transaction', 'flask-swagger', 'homeassistant',\n 'hvac', 'vcrpy', 'github3-py', 'schematics', 'tinycss',\n 'swagger-spec-validator', 'progressbar2', 'pydot', 'backoff', 'pytsite',\n 'scapy', 'attrdict', 'shellescape', 'impyla', 'flatten-dict',\n 'requests-kerberos', 'pykerberos', 'repoze-who', 'mxnet-mkl', 'cssmin',\n 'dask', 'cheroot', 'flake8-polyfill', 'pyotp', 'python-designateclient',\n 'simple-salesforce', 'hupper', 'neutron-lib', 'wavefront-cli', 'deepdiff',\n 'connexion', 'phonenumberslite', 'natsort', 'tox-travis', 'btrees',\n 'rednose', 'flask-testing', 'premailer', 'shortuuid', 'django-countries',\n 'ocflib', 'pylint-plugin-utils', 'pyenchant', 'logging', 'pysmi',\n 'appier-extras', 'zc-recipe-egg', 'oslo-rootwrap', 'flaky', 'libsass',\n 'oslo-versionedobjects', 'ipy', 'pecan', 'diff-match-patch',\n 'oslo-reports', 'google', 'aspen', 'rollbar', 'cobra',\n 'restructuredtext-lint', 'pythonnet', 'line-profiler', 'trollius',\n 'django-bootstrap3', 'pygeoip', 'django-picklefield', 'django-reversion',\n 'cytoolz', 'beaker', 'tooz', 'flask-assets', 'uuid', 'osprofiler',\n 'bitstring', 'naked', 'flask-babel', 'plac', 'semver', 'django-formtools',\n 'python-snappy', 'persistent', 'terminaltables', 'taskflow', 'boxsdk',\n 'cerberus', 'flask-principal', 'thinc', 'spacy', 'pycares', 'pylru',\n 'kafka', 'pkgconfig', 'couchbase', 'python-utils', 'django-localflavor',\n 'django-redis-cache', 'webapp2', 'sqlalchemy-redshift', 'salt',\n 'structlog', 'mandrill', 'googlemaps', 'easy-thumbnails', 'automaton',\n 'webcolors'\n]\n\n\ndef cleanup_toml(tml):\n toml = tml.split('\\n')\n new_toml = []\n\n # Remove all empty lines from TOML.\n for line in toml:\n if line.strip():\n new_toml.append(line)\n\n toml = '\\n'.join(new_toml)\n new_toml = []\n\n # Add newlines between TOML sections.\n for i, line in enumerate(toml.split('\\n')):\n after = False\n # Skip the first line.\n if line.startswith('['):\n if i > 0:\n # Insert a newline before the heading.\n new_toml.append('\\n')\n after = True\n\n new_toml.append(line)\n # Insert a newline after the heading.\n if after:\n new_toml.append('')\n \n # adding new line at the end of the TOML file\n new_toml.append('')\n toml = '\\n'.join(new_toml)\n return toml\n\n\ndef suggest_package(package):\n \"\"\"Suggests a package name, given a package name.\"\"\"\n if SESSION_IS_INTERACTIVE:\n\n if ('-' in package) or ('[' in package) or ('+' in package):\n THRESHOLD = 90\n else:\n THRESHOLD = 86\n\n # Bypass for speed.\n if package in packages:\n return package\n\n result = fuzzywuzzy.process.extractOne(package, packages)\n\n if result[1] > THRESHOLD:\n return result[0]\n\n\ndef python_version(path_to_python):\n if not path_to_python:\n return None\n\n try:\n c = delegator.run([path_to_python, '--version'], block=False)\n except Exception:\n return None\n output = c.out.strip() or c.err.strip()\n\n @parse.with_pattern(r'.*')\n def allow_empty(text):\n return text\n\n TEMPLATE = 'Python {}.{}.{:d}{:AllowEmpty}'\n parsed = parse.parse(TEMPLATE, output, dict(AllowEmpty=allow_empty))\n if parsed:\n parsed = parsed.fixed\n else:\n return None\n\n return u\"{v[0]}.{v[1]}.{v[2]}\".format(v=parsed)\n\n\ndef shellquote(s):\n \"\"\"Prepares a string for the shell (on Windows too!)\"\"\"\n if s is None:\n return None\n # Additional escaping for windows paths\n if os.name == 'nt':\n s = \"{}\".format(s.replace(\"\\\\\", \"\\\\\\\\\"))\n\n return '\"' + s.replace(\"'\", \"'\\\\''\") + '\"'\n\n\ndef clean_pkg_version(version):\n \"\"\"Uses pip to prepare a package version string, from our internal version.\"\"\"\n return six.u(pep440_version(str(version).replace('==', '')))\n\n\nclass HackedPythonVersion(object):\n \"\"\"A Beautiful hack, which allows us to tell pip which version of Python we're using.\"\"\"\n def __init__(self, python_version, python_path):\n self.python_version = python_version\n self.python_path = python_path\n\n def __enter__(self):\n os.environ['PIP_PYTHON_VERSION'] = str(self.python_version)\n os.environ['PIP_PYTHON_PATH'] = str(self.python_path)\n\n def __exit__(self, *args):\n # Restore original Python version information.\n del os.environ['PIP_PYTHON_VERSION']\n\n\ndef prepare_pip_source_args(sources, pip_args=None):\n if pip_args is None:\n pip_args = []\n\n if sources:\n # Add the source to pip.\n pip_args.extend(['-i', sources[0]['url']])\n\n # Trust the host if it's not verified.\n if not sources[0].get('verify_ssl', True):\n pip_args.extend(['--trusted-host', urlparse(sources[0]['url']).netloc.split(':')[0]])\n\n # Add additional sources as extra indexes.\n if len(sources) > 1:\n for source in sources[1:]:\n pip_args.extend(['--extra-index-url', source['url']])\n\n # Trust the host if it's not verified.\n if not source.get('verify_ssl', True):\n pip_args.extend(['--trusted-host', urlparse(source['url']).netloc.split(':')[0]])\n\n return pip_args\n\n\ndef resolve_deps(deps, which, which_pip, project, sources=None, verbose=False, python=False, clear=False, pre=False):\n \"\"\"Given a list of dependencies, return a resolved list of dependencies,\n using pip-tools -- and their hashes, using the warehouse API / pip.\n \"\"\"\n\n index_lookup = {}\n markers_lookup = {}\n\n python_path = which('python')\n\n with HackedPythonVersion(python_version=python, python_path=python_path):\n\n class PipCommand(pip.basecommand.Command):\n \"\"\"Needed for pip-tools.\"\"\"\n name = 'PipCommand'\n\n constraints = []\n\n for dep in deps:\n t = tempfile.mkstemp(prefix='pipenv-', suffix='-requirement.txt')[1]\n with open(t, 'w') as f:\n f.write(dep)\n\n if dep.startswith('-e '):\n constraint = pip.req.InstallRequirement.from_editable(dep[len('-e '):])\n else:\n constraint = [c for c in pip.req.parse_requirements(t, session=pip._vendor.requests)][0]\n # extra_constraints = []\n\n if ' -i ' in dep:\n index_lookup[constraint.name] = project.get_source(url=dep.split(' -i ')[1]).get('name')\n\n if constraint.markers:\n markers_lookup[constraint.name] = str(constraint.markers).replace('\"', \"'\")\n\n constraints.append(constraint)\n\n pip_command = get_pip_command()\n\n pip_args = []\n\n if sources:\n pip_args = prepare_pip_source_args(sources, pip_args)\n\n if verbose:\n print('Using pip: {0}'.format(' '.join(pip_args)))\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n pypi = PyPIRepository(pip_options=pip_options, session=session)\n\n if verbose:\n logging.log.verbose = True\n\n results = []\n resolved_tree = set()\n\n resolver = Resolver(constraints=constraints, repository=pypi, clear_caches=clear, prereleases=pre)\n # pre-resolve instead of iterating to avoid asking pypi for hashes of editable packages\n try:\n resolved_tree.update(resolver.resolve(max_rounds=PIPENV_MAX_ROUNDS))\n except (NoCandidateFound, DistributionNotFound, HTTPError) as e:\n click.echo(\n '{0}: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\\n '\n 'You can use {1} to bypass this mechanism, then run {2} to inspect the situation.'\n ''.format(\n crayons.red('Warning', bold=True),\n crayons.red('$ pipenv install --skip-lock'),\n crayons.red('$ pipenv graph')\n ),\n err=True)\n click.echo(crayons.blue(e))\n sys.exit(1)\n\n for result in resolved_tree:\n if not result.editable:\n name = pep423_name(result.name)\n version = clean_pkg_version(result.specifier)\n index = index_lookup.get(result.name)\n\n if not markers_lookup.get(result.name):\n markers = str(result.markers) if result.markers and 'extra' not in str(result.markers) else None\n else:\n markers = markers_lookup.get(result.name)\n\n collected_hashes = []\n if 'python.org' in '|'.join([source['url'] for source in sources]):\n try:\n # Grab the hashes from the new warehouse API.\n r = requests.get('https://pypi.org/pypi/{0}/json'.format(name), timeout=10)\n api_releases = r.json()['releases']\n\n cleaned_releases = {}\n for api_version, api_info in api_releases.items():\n cleaned_releases[clean_pkg_version(api_version)] = api_info\n\n for release in cleaned_releases[version]:\n collected_hashes.append(release['digests']['sha256'])\n\n collected_hashes = ['sha256:' + s for s in collected_hashes]\n\n # Collect un-collectable hashes.\n if not collected_hashes:\n collected_hashes = list(list(resolver.resolve_hashes([result]).items())[0][1])\n\n except (ValueError, KeyError):\n if verbose:\n print('Error fetching {}'.format(name))\n\n d = {'name': name, 'version': version, 'hashes': collected_hashes}\n\n if index:\n d.update({'index': index})\n\n if markers:\n d.update({'markers': markers.replace('\"', \"'\")})\n\n results.append(d)\n\n return results\n\n\ndef multi_split(s, split):\n \"\"\"Splits on multiple given separators.\"\"\"\n\n for r in split:\n s = s.replace(r, '|')\n\n return [i for i in s.split('|') if len(i) > 0]\n\n\ndef convert_deps_from_pip(dep):\n \"\"\"\"Converts a pip-formatted dependency to a Pipfile-formatted one.\"\"\"\n\n dependency = {}\n\n req = [r for r in requirements.parse(dep)][0]\n extras = {'extras': req.extras}\n\n # File installs.\n if (req.uri or (os.path.isfile(req.path) if req.path else False) or\n os.path.isfile(req.name)) and not req.vcs:\n # Assign a package name to the file, last 7 of it's sha256 hex digest.\n if not req.uri and not req.path:\n req.path = os.path.abspath(req.name)\n\n hashable_path = req.uri if req.uri else req.path\n req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest()\n req.name = req.name[len(req.name) - 7:]\n\n # {path: uri} TOML (spec 4 I guess...)\n if req.uri:\n dependency[req.name] = {'file': hashable_path}\n else:\n dependency[req.name] = {'path': hashable_path}\n\n # Add --editable if applicable\n if req.editable:\n dependency[req.name].update({'editable': True})\n\n # VCS Installs. Extra check for unparsed git over SSH\n if req.vcs or is_vcs(req.path):\n if req.name is None:\n raise ValueError('pipenv requires an #egg fragment for version controlled '\n 'dependencies. Please install remote dependency '\n 'in the form {0}#egg=<package-name>.'.format(req.uri))\n\n # Extras: e.g. #egg=requests[security]\n if req.extras:\n dependency[req.name] = extras\n\n # Set up this requirement as a proper VCS requirement if it was not\n if not req.vcs and req.path.startswith(VCS_LIST):\n req.vcs = [vcs for vcs in VCS_LIST if req.path.startswith(vcs)][0]\n req.uri = '{0}'.format(req.path)\n req.path = None\n\n # Crop off the git+, etc part.\n dependency.setdefault(req.name, {}).update({req.vcs: req.uri[len(req.vcs) + 1:]})\n\n # Add --editable, if it's there.\n if req.editable:\n dependency[req.name].update({'editable': True})\n\n # Add subdirectory, if it's there\n if req.subdirectory:\n dependency[req.name].update({'subdirectory': req.subdirectory})\n\n # Add the specifier, if it was provided.\n if req.revision:\n dependency[req.name].update({'ref': req.revision})\n\n elif req.extras or req.specs:\n\n specs = None\n # Comparison operators: e.g. Django>1.10\n if req.specs:\n r = multi_split(dep, '!=<>~')\n specs = dep[len(r[0]):]\n dependency[req.name] = specs\n\n # Extras: e.g. requests[socks]\n if req.extras:\n dependency[req.name] = extras\n\n if specs:\n dependency[req.name].update({'version': specs})\n\n # Bare dependencies: e.g. requests\n else:\n dependency[dep] = '*'\n\n # Cleanup when there's multiple values, e.g. -e.\n if len(dependency) > 1:\n for key in dependency.copy():\n if not hasattr(dependency[key], 'keys'):\n del dependency[key]\n\n return dependency\n\n\ndef convert_deps_to_pip(deps, project=None, r=True, include_index=False):\n \"\"\"\"Converts a Pipfile-formatted dependency to a pip-formatted one.\"\"\"\n\n dependencies = []\n\n for dep in deps.keys():\n\n # Default (e.g. '>1.10').\n extra = deps[dep] if isinstance(deps[dep], six.string_types) else ''\n version = ''\n index = ''\n\n # Get rid of '*'.\n if deps[dep] == '*' or str(extra) == '{}':\n extra = ''\n\n hash = ''\n # Support for single hash (spec 1).\n if 'hash' in deps[dep]:\n hash = ' --hash={0}'.format(deps[dep]['hash'])\n\n # Support for multiple hashes (spec 2).\n if 'hashes' in deps[dep]:\n hash = '{0} '.format(''.join([' --hash={0} '.format(h) for h in deps[dep]['hashes']]))\n\n # Support for extras (e.g. requests[socks])\n if 'extras' in deps[dep]:\n extra = '[{0}]'.format(deps[dep]['extras'][0])\n\n if 'version' in deps[dep]:\n if not deps[dep]['version'] == '*':\n version = deps[dep]['version']\n\n # For lockfile format.\n if 'markers' in deps[dep]:\n specs = '; {0}'.format(deps[dep]['markers'])\n else:\n # For pipfile format.\n specs = []\n for specifier in specifiers:\n if specifier in deps[dep]:\n if not deps[dep][specifier] == '*':\n specs.append('{0} {1}'.format(specifier, deps[dep][specifier]))\n if specs:\n specs = '; {0}'.format(' and '.join(specs))\n else:\n specs = ''\n\n if include_index:\n if 'index' in deps[dep]:\n pip_args = prepare_pip_source_args([project.get_source(deps[dep]['index'])])\n index = ' '.join(pip_args)\n\n # Support for version control\n maybe_vcs = [vcs for vcs in VCS_LIST if vcs in deps[dep]]\n vcs = maybe_vcs[0] if maybe_vcs else None\n\n # Support for files.\n if 'file' in deps[dep]:\n extra = '{1}{0}'.format(extra, deps[dep]['file']).strip()\n\n # Flag the file as editable if it is a local relative path\n if 'editable' in deps[dep]:\n dep = '-e '\n else:\n dep = ''\n\n # Support for paths.\n elif 'path' in deps[dep]:\n extra = '{1}{0}'.format(extra, deps[dep]['path']).strip()\n\n # Flag the file as editable if it is a local relative path\n if 'editable' in deps[dep]:\n dep = '-e '\n else:\n dep = ''\n\n if vcs:\n extra = '{0}+{1}'.format(vcs, deps[dep][vcs])\n\n # Support for @refs.\n if 'ref' in deps[dep]:\n extra += '@{0}'.format(deps[dep]['ref'])\n\n extra += '#egg={0}'.format(dep)\n\n # Support for subdirectory\n if 'subdirectory' in deps[dep]:\n extra += '&subdirectory={0}'.format(deps[dep]['subdirectory'])\n\n # Support for editable.\n if 'editable' in deps[dep]:\n # Support for --egg.\n dep = '-e '\n else:\n dep = ''\n\n s = '{0}{1}{2}{3}{4} {5}'.format(dep, extra, version, specs, hash, index).strip()\n dependencies.append(s)\n if not r:\n return dependencies\n\n # Write requirements.txt to tmp directory.\n f = tempfile.NamedTemporaryFile(suffix='-requirements.txt', delete=False)\n f.write('\\n'.join(dependencies).encode('utf-8'))\n return f.name\n\n\ndef mkdir_p(newdir):\n \"\"\"works the way a good mkdir should :)\n - already exists, silently complete\n - regular file in the way, raise an exception\n - parent directory(ies) does not exist, make them as well\n From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/\n \"\"\"\n\n if os.path.isdir(newdir):\n pass\n elif os.path.isfile(newdir):\n raise OSError(\"a file with the same name as the desired dir, '{0}', already exists.\".format(newdir))\n else:\n head, tail = os.path.split(newdir)\n if head and not os.path.isdir(head):\n mkdir_p(head)\n if tail:\n os.mkdir(newdir)\n\n\ndef is_required_version(version, specified_version):\n \"\"\"Check to see if there's a hard requirement for version\n number provided in the Pipfile.\n \"\"\"\n\n # Certain packages may be defined with multiple values.\n if isinstance(specified_version, dict):\n specified_version = specified_version.get('version', '')\n if specified_version.startswith('=='):\n return version.strip() == specified_version.split('==')[1].strip()\n return True\n\n\ndef is_vcs(pipfile_entry):\n \"\"\"Determine if dictionary entry from Pipfile is for a vcs dependency.\"\"\"\n\n if hasattr(pipfile_entry, 'keys'):\n return any(key for key in pipfile_entry.keys() if key in VCS_LIST)\n elif isinstance(pipfile_entry, six.string_types):\n return pipfile_entry.startswith(VCS_LIST)\n return False\n\n\ndef is_file(package):\n \"\"\"Determine if a package name is for a File dependency.\"\"\"\n if hasattr(package, 'keys'):\n return any(key for key in package.keys() if key in ['file', 'path'])\n\n if os.path.exists(str(package)):\n return True\n\n for start in FILE_LIST:\n if str(package).startswith(start):\n return True\n\n return False\n\n\ndef pep440_version(version):\n \"\"\"Normalize version to PEP 440 standards\"\"\"\n\n # Use pip built-in version parser.\n return str(pip.index.parse_version(version))\n\n\ndef pep423_name(name):\n \"\"\"Normalize package name to PEP 423 style standard.\"\"\"\n name = name.lower()\n if any(i not in name for i in (VCS_LIST+FILE_LIST)):\n return name.replace('_', '-')\n else:\n return name\n\n\ndef proper_case(package_name):\n \"\"\"Properly case project name from pypi.org.\"\"\"\n\n # Hit the simple API.\n r = requests.get('https://pypi.org/pypi/{0}/json'.format(package_name), timeout=0.3, stream=True)\n if not r.ok:\n raise IOError('Unable to find package {0} in PyPI repository.'.format(package_name))\n\n r = parse.parse('https://pypi.org/pypi/{name}/json', r.url)\n good_name = r['name']\n\n return good_name\n\n\ndef split_vcs(split_file):\n \"\"\"Split VCS dependencies out from file.\"\"\"\n\n if 'packages' in split_file or 'dev-packages' in split_file:\n sections = ('packages', 'dev-packages')\n elif 'default' in split_file or 'develop' in split_file:\n sections = ('default', 'develop')\n\n # For each vcs entry in a given section, move it to section-vcs.\n for section in sections:\n entries = split_file.get(section, {})\n vcs_dict = dict((k, entries.pop(k)) for k in list(entries.keys()) if is_vcs(entries[k]))\n split_file[section + '-vcs'] = vcs_dict\n\n return split_file\n\n\ndef recase_file(file_dict):\n \"\"\"Recase file before writing to output.\"\"\"\n\n if 'packages' in file_dict or 'dev-packages' in file_dict:\n sections = ('packages', 'dev-packages')\n elif 'default' in file_dict or 'develop' in file_dict:\n sections = ('default', 'develop')\n\n for section in sections:\n file_section = file_dict.get(section, {})\n\n # Try to properly case each key if we can.\n for key in list(file_section.keys()):\n try:\n cased_key = proper_case(key)\n except IOError:\n cased_key = key\n file_section[cased_key] = file_section.pop(key)\n\n return file_dict\n\n\ndef get_windows_path(*args):\n \"\"\"Sanitize a path for windows environments\n\n Accepts an arbitrary list of arguments and makes a clean windows path\"\"\"\n return os.path.normpath(os.path.join(*args))\n\n\ndef find_windows_executable(bin_path, exe_name):\n \"\"\"Given an executable name, search the given location for an executable\"\"\"\n requested_path = get_windows_path(bin_path, exe_name)\n if os.path.exists(requested_path):\n return requested_path\n\n # Ensure we aren't adding two layers of file extensions\n exe_name = os.path.splitext(exe_name)[0]\n files = ['{0}.{1}'.format(exe_name, ext) for ext in ['', 'py', 'exe', 'bat']]\n exec_paths = [get_windows_path(bin_path, f) for f in files]\n exec_files = [filename for filename in exec_paths if os.path.isfile(filename)]\n return exec_files[0]\n\n\ndef walk_up(bottom):\n \"\"\"Mimic os.walk, but walk 'up' instead of down the directory tree.\n From: https://gist.github.com/zdavkeos/1098474\n \"\"\"\n\n bottom = os.path.realpath(bottom)\n\n # Get files in current dir.\n try:\n names = os.listdir(bottom)\n except Exception:\n return\n\n dirs, nondirs = [], []\n for name in names:\n if os.path.isdir(os.path.join(bottom, name)):\n dirs.append(name)\n else:\n nondirs.append(name)\n\n yield bottom, dirs, nondirs\n\n new_path = os.path.realpath(os.path.join(bottom, '..'))\n\n # See if we are at the top.\n if new_path == bottom:\n return\n\n for x in walk_up(new_path):\n yield x\n\n\ndef find_requirements(max_depth=3):\n \"\"\"Returns the path of a Pipfile in parent directories.\"\"\"\n\n i = 0\n for c, d, f in walk_up(os.getcwd()):\n i += 1\n\n if i < max_depth:\n if 'requirements.txt':\n r = os.path.join(c, 'requirements.txt')\n if os.path.isfile(r):\n return r\n raise RuntimeError('No requirements.txt found!')\n\n\n# Borrowed from pew to avoid importing pew which imports psutil\n# See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82\n@contextmanager\ndef temp_environ():\n \"\"\"Allow the ability to set os.environ temporarily\"\"\"\n environ = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(environ)\n\ndef is_valid_url(url):\n \"\"\"Checks if a given string is an url\"\"\"\n pieces = urlparse(url)\n return all([pieces.scheme, pieces.netloc])\n\n\ndef download_file(url, filename):\n \"\"\"Downloads file from url to a path with filename\"\"\"\n r = requests.get(url, stream=True)\n if not r.ok:\n raise IOError('Unable to download file')\n\n r.raw.decode_content = True\n with open(filename, 'wb') as f:\n shutil.copyfileobj(r.raw, f)\n", "path": "pipenv/utils.py" } ]
diff --git a/pipenv/utils.py b/pipenv/utils.py index bb118b7cc1..bf99cdd578 100644 --- a/pipenv/utils.py +++ b/pipenv/utils.py @@ -290,7 +290,9 @@ def cleanup_toml(tml): # Insert a newline after the heading. if after: new_toml.append('') - + + # adding new line at the end of the TOML file + new_toml.append('') toml = '\n'.join(new_toml) return toml diff --git a/tests/test_utils.py b/tests/test_utils.py index b094a34658..4922d2949b 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -186,3 +186,21 @@ def test_download_file(self): pipenv.utils.download_file(url, output) assert os.path.exists(output) os.remove(output) + + def test_new_line_end_of_toml_file(this): + # toml file that needs clean up + toml = """ +[dev-packages] + +"flake8" = ">=3.3.0,<4" +pytest = "*" +mock = "*" +sphinx = "<=1.5.5" +"-e ." = "*" +twine = "*" +"sphinx-click" = "*" +"pytest-xdist" = "*" + """ + new_toml = pipenv.utils.cleanup_toml(toml) + # testing if the end of the generated file contains a newline + assert new_toml[-1] == '\n'
benoitc__gunicorn-1806
I get error in this package AttributeError: 'NoneType' object has no attribute 'add_extra_file' hi every one .. when i try to deploy keras model into google cloud i get this error ... ```py File "/home/falahgs07/keras/env/lib/python3.5/site-packages/gunicorn/workers/base.py", line 126, in init_process self.load_wsgi() File "/home/falahgs07/keras/env/lib/python3.5/site-packages/gunicorn/workers/base.py", line 148, in load_wsgi self.reloader.add_extra_file(exc_val.filename) AttributeError: 'NoneType' object has no attribute 'add_extra_file' ```
[ { "content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nfrom random import randint\nimport signal\nfrom ssl import SSLError\nimport sys\nimport time\nimport traceback\n\nfrom gunicorn import six\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import reloader_engines\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.errors import InvalidSchemeHeaders\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.pid = \"[booting]\"\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid,\n initgroups=self.cfg.initgroups)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n for s in self.sockets:\n util.close_on_exec(s)\n util.close_on_exec(self.tmp.fileno())\n\n self.wait_fds = self.sockets + [self.PIPE[0]]\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n self.alive = False\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n reloader_cls = reloader_engines[self.cfg.reload_engine]\n self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files,\n callback=changed)\n self.reloader.start()\n\n self.load_wsgi()\n self.cfg.post_worker_init(self)\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if self.cfg.reload == 'off':\n raise\n\n self.log.exception(e)\n\n # fix from PR #1228\n # storing the traceback into exc_tb will create a circular reference.\n # per https://docs.python.org/2/library/sys.html#sys.exc_info warning,\n # delete the traceback after use.\n try:\n _, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = six.StringIO()\n traceback.print_tb(exc_tb, file=tb_string)\n self.wsgi = util.make_fail_app(tb_string.getvalue())\n finally:\n del exc_tb\n\n def init_signals(self):\n # reset signaling\n for s in self.SIGNALS:\n signal.signal(s, signal.SIG_DFL)\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n if hasattr(signal, 'set_wakeup_fd'):\n signal.set_wakeup_fd(self.PIPE[1])\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest,\n InvalidSchemeHeaders,\n SSLError)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n elif isinstance(exc, InvalidSchemeHeaders):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, SSLError):\n reason = \"Forbidden\"\n mesg = \"'%s'\" % str(exc)\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n if hasattr(req, \"uri\"):\n self.log.exception(\"Error handling request %s\", req.uri)\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n self.log.debug(\"worker: SIGWINCH ignored.\")\n", "path": "gunicorn/workers/base.py" } ]
[ { "content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nfrom random import randint\nimport signal\nfrom ssl import SSLError\nimport sys\nimport time\nimport traceback\n\nfrom gunicorn import six\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import reloader_engines\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.errors import InvalidSchemeHeaders\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.pid = \"[booting]\"\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid,\n initgroups=self.cfg.initgroups)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n for s in self.sockets:\n util.close_on_exec(s)\n util.close_on_exec(self.tmp.fileno())\n\n self.wait_fds = self.sockets + [self.PIPE[0]]\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n self.alive = False\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n reloader_cls = reloader_engines[self.cfg.reload_engine]\n self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files,\n callback=changed)\n self.reloader.start()\n\n self.load_wsgi()\n self.cfg.post_worker_init(self)\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n # fix from PR #1228\n # storing the traceback into exc_tb will create a circular reference.\n # per https://docs.python.org/2/library/sys.html#sys.exc_info warning,\n # delete the traceback after use.\n try:\n _, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = six.StringIO()\n traceback.print_tb(exc_tb, file=tb_string)\n self.wsgi = util.make_fail_app(tb_string.getvalue())\n finally:\n del exc_tb\n\n def init_signals(self):\n # reset signaling\n for s in self.SIGNALS:\n signal.signal(s, signal.SIG_DFL)\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n if hasattr(signal, 'set_wakeup_fd'):\n signal.set_wakeup_fd(self.PIPE[1])\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest,\n InvalidSchemeHeaders,\n SSLError)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n elif isinstance(exc, InvalidSchemeHeaders):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, SSLError):\n reason = \"Forbidden\"\n mesg = \"'%s'\" % str(exc)\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n if hasattr(req, \"uri\"):\n self.log.exception(\"Error handling request %s\", req.uri)\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n self.log.debug(\"worker: SIGWINCH ignored.\")\n", "path": "gunicorn/workers/base.py" } ]
diff --git a/docs/source/news.rst b/docs/source/news.rst index 3d90aead2..8f61b9c87 100644 --- a/docs/source/news.rst +++ b/docs/source/news.rst @@ -2,6 +2,13 @@ Changelog ========= +19.x / not released +=================== + +- fix: prevent raising :exc:`AttributeError` when ``--reload`` is not passed + in case of a :exc:`SyntaxError` raised from the WSGI application. + (:issue:`1805`, :pr:`1806`) + 19.8.1 / 2018/04/30 =================== diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py index ce40796f5..881efa0f0 100644 --- a/gunicorn/workers/base.py +++ b/gunicorn/workers/base.py @@ -137,7 +137,7 @@ def load_wsgi(self): try: self.wsgi = self.app.wsgi() except SyntaxError as e: - if self.cfg.reload == 'off': + if not self.cfg.reload: raise self.log.exception(e)
frappe__frappe-16129
no_copy option not available in customize form field <!-- Welcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following: 1. This tracker should only be used to report bugs and request features / enhancements to Frappe - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet 2. Use the search function before creating a new issue. Duplicates will be closed and directed to the original discussion. 3. When making a bug report, make sure you provide all required information. The easier it is for maintainers to reproduce, the faster it'll be fixed. 4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR 😉 --> ## Description of the issue ## Context information (for bug reports) **Output of `bench version`** ``` (paste here) ``` ## Steps to reproduce the issue 1. 2. 3. ### Observed result no copy is a field property in doctype, but in customize form, the no copy option is not available for the field. ### Expected result make no copy as available property of the field in customize form ### Stacktrace / full error message ``` (paste here) ``` ## Additional information OS version / distribution, `Frappe` install method, etc. no_copy option not available in customize form field <!-- Welcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following: 1. This tracker should only be used to report bugs and request features / enhancements to Frappe - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet 2. Use the search function before creating a new issue. Duplicates will be closed and directed to the original discussion. 3. When making a bug report, make sure you provide all required information. The easier it is for maintainers to reproduce, the faster it'll be fixed. 4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR 😉 --> ## Description of the issue ## Context information (for bug reports) **Output of `bench version`** ``` (paste here) ``` ## Steps to reproduce the issue 1. 2. 3. ### Observed result no copy is a field property in doctype, but in customize form, the no copy option is not available for the field. ### Expected result make no copy as available property of the field in customize form ### Stacktrace / full error message ``` (paste here) ``` ## Additional information OS version / distribution, `Frappe` install method, etc.
[ { "content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\n\"\"\"\n\tCustomize Form is a Single DocType used to mask the Property Setter\n\tThus providing a better UI from user perspective\n\"\"\"\nimport json\nimport frappe\nimport frappe.translate\nfrom frappe import _\nfrom frappe.utils import cint\nfrom frappe.model.document import Document\nfrom frappe.model import no_value_fields, core_doctypes_list\nfrom frappe.core.doctype.doctype.doctype import validate_fields_for_doctype, check_email_append_to\nfrom frappe.custom.doctype.custom_field.custom_field import create_custom_field\nfrom frappe.custom.doctype.property_setter.property_setter import delete_property_setter\nfrom frappe.model.docfield import supports_translation\nfrom frappe.core.doctype.doctype.doctype import validate_series\n\nclass CustomizeForm(Document):\n\tdef on_update(self):\n\t\tfrappe.db.sql(\"delete from tabSingles where doctype='Customize Form'\")\n\t\tfrappe.db.sql(\"delete from `tabCustomize Form Field`\")\n\n\[email protected]()\n\tdef fetch_to_customize(self):\n\t\tself.clear_existing_doc()\n\t\tif not self.doc_type:\n\t\t\treturn\n\n\t\tmeta = frappe.get_meta(self.doc_type)\n\n\t\tself.validate_doctype(meta)\n\n\t\t# load the meta properties on the customize (self) object\n\t\tself.load_properties(meta)\n\n\t\t# load custom translation\n\t\ttranslation = self.get_name_translation()\n\t\tself.label = translation.translated_text if translation else ''\n\n\t\tself.create_auto_repeat_custom_field_if_required(meta)\n\n\t\t# NOTE doc (self) is sent to clientside by run_method\n\n\tdef validate_doctype(self, meta):\n\t\t'''\n\t\tCheck if the doctype is allowed to be customized.\n\t\t'''\n\t\tif self.doc_type in core_doctypes_list:\n\t\t\tfrappe.throw(_(\"Core DocTypes cannot be customized.\"))\n\n\t\tif meta.issingle:\n\t\t\tfrappe.throw(_(\"Single DocTypes cannot be customized.\"))\n\n\t\tif meta.custom:\n\t\t\tfrappe.throw(_(\"Only standard DocTypes are allowed to be customized from Customize Form.\"))\n\n\tdef load_properties(self, meta):\n\t\t'''\n\t\tLoad the customize object (this) with the metadata properties\n\t\t'''\n\t\t# doctype properties\n\t\tfor prop in doctype_properties:\n\t\t\tself.set(prop, meta.get(prop))\n\n\t\tfor d in meta.get(\"fields\"):\n\t\t\tnew_d = {\"fieldname\": d.fieldname, \"is_custom_field\": d.get(\"is_custom_field\"), \"name\": d.name}\n\t\t\tfor prop in docfield_properties:\n\t\t\t\tnew_d[prop] = d.get(prop)\n\t\t\tself.append(\"fields\", new_d)\n\n\t\tfor fieldname in ('links', 'actions'):\n\t\t\tfor d in meta.get(fieldname):\n\t\t\t\tself.append(fieldname, d)\n\n\tdef create_auto_repeat_custom_field_if_required(self, meta):\n\t\t'''\n\t\tCreate auto repeat custom field if it's not already present\n\t\t'''\n\t\tif self.allow_auto_repeat:\n\t\t\tall_fields = [df.fieldname for df in meta.fields]\n\n\t\t\tif \"auto_repeat\" in all_fields:\n\t\t\t\treturn\n\n\t\t\tinsert_after = self.fields[len(self.fields) - 1].fieldname\n\t\t\tcreate_custom_field(self.doc_type, dict(\n\t\t\t\tfieldname='auto_repeat',\n\t\t\t\tlabel='Auto Repeat',\n\t\t\t\tfieldtype='Link',\n\t\t\t\toptions='Auto Repeat',\n\t\t\t\tinsert_after=insert_after,\n\t\t\t\tread_only=1, no_copy=1, print_hide=1\n\t\t\t))\n\n\n\tdef get_name_translation(self):\n\t\t'''Get translation object if exists of current doctype name in the default language'''\n\t\treturn frappe.get_value('Translation', {\n\t\t\t\t'source_text': self.doc_type,\n\t\t\t\t'language': frappe.local.lang or 'en'\n\t\t\t}, ['name', 'translated_text'], as_dict=True)\n\n\tdef set_name_translation(self):\n\t\t'''Create, update custom translation for this doctype'''\n\t\tcurrent = self.get_name_translation()\n\t\tif not self.label:\n\t\t\tif current:\n\t\t\t\t# clear translation\n\t\t\t\tfrappe.delete_doc('Translation', current.name)\n\t\t\treturn\n\n\t\tif not current:\n\t\t\tfrappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": 'Translation',\n\t\t\t\t\t\"source_text\": self.doc_type,\n\t\t\t\t\t\"translated_text\": self.label,\n\t\t\t\t\t\"language_code\": frappe.local.lang or 'en'\n\t\t\t\t}\n\t\t\t).insert()\n\t\t\treturn\n\n\t\tif self.label != current.translated_text:\n\t\t\tfrappe.db.set_value('Translation', current.name, 'translated_text', self.label)\n\t\t\tfrappe.translate.clear_cache()\n\n\tdef clear_existing_doc(self):\n\t\tdoc_type = self.doc_type\n\n\t\tfor fieldname in self.meta.get_valid_columns():\n\t\t\tself.set(fieldname, None)\n\n\t\tfor df in self.meta.get_table_fields():\n\t\t\tself.set(df.fieldname, [])\n\n\t\tself.doc_type = doc_type\n\t\tself.name = \"Customize Form\"\n\n\[email protected]()\n\tdef save_customization(self):\n\t\tif not self.doc_type:\n\t\t\treturn\n\t\tvalidate_series(self, self.autoname, self.doc_type)\n\t\tself.flags.update_db = False\n\t\tself.flags.rebuild_doctype_for_global_search = False\n\t\tself.set_property_setters()\n\t\tself.update_custom_fields()\n\t\tself.set_name_translation()\n\t\tvalidate_fields_for_doctype(self.doc_type)\n\t\tcheck_email_append_to(self)\n\n\t\tif self.flags.update_db:\n\t\t\tfrappe.db.updatedb(self.doc_type)\n\n\t\tif not hasattr(self, 'hide_success') or not self.hide_success:\n\t\t\tfrappe.msgprint(_(\"{0} updated\").format(_(self.doc_type)), alert=True)\n\t\tfrappe.clear_cache(doctype=self.doc_type)\n\t\tself.fetch_to_customize()\n\n\t\tif self.flags.rebuild_doctype_for_global_search:\n\t\t\tfrappe.enqueue('frappe.utils.global_search.rebuild_for_doctype',\n\t\t\t\tnow=True, doctype=self.doc_type)\n\n\tdef set_property_setters(self):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\n\t\t# doctype\n\t\tself.set_property_setters_for_doctype(meta)\n\n\t\t# docfield\n\t\tfor df in self.get(\"fields\"):\n\t\t\tmeta_df = meta.get(\"fields\", {\"fieldname\": df.fieldname})\n\t\t\tif not meta_df or meta_df[0].get(\"is_custom_field\"):\n\t\t\t\tcontinue\n\t\t\tself.set_property_setters_for_docfield(meta, df, meta_df)\n\n\t\t# action and links\n\t\tself.set_property_setters_for_actions_and_links(meta)\n\n\tdef set_property_setters_for_doctype(self, meta):\n\t\tfor prop, prop_type in doctype_properties.items():\n\t\t\tif self.get(prop) != meta.get(prop):\n\t\t\t\tself.make_property_setter(prop, self.get(prop), prop_type)\n\n\tdef set_property_setters_for_docfield(self, meta, df, meta_df):\n\t\tfor prop, prop_type in docfield_properties.items():\n\t\t\tif prop != \"idx\" and (df.get(prop) or '') != (meta_df[0].get(prop) or ''):\n\t\t\t\tif not self.allow_property_change(prop, meta_df, df):\n\t\t\t\t\tcontinue\n\n\t\t\t\tself.make_property_setter(prop, df.get(prop), prop_type,\n\t\t\t\t\tfieldname=df.fieldname)\n\n\tdef allow_property_change(self, prop, meta_df, df):\n\t\tif prop == \"fieldtype\":\n\t\t\tself.validate_fieldtype_change(df, meta_df[0].get(prop), df.get(prop))\n\n\t\telif prop == \"length\":\n\t\t\told_value_length = cint(meta_df[0].get(prop))\n\t\t\tnew_value_length = cint(df.get(prop))\n\n\t\t\tif new_value_length and (old_value_length > new_value_length):\n\t\t\t\tself.check_length_for_fieldtypes.append({'df': df, 'old_value': meta_df[0].get(prop)})\n\t\t\t\tself.validate_fieldtype_length()\n\t\t\telse:\n\t\t\t\tself.flags.update_db = True\n\n\t\telif prop == \"allow_on_submit\" and df.get(prop):\n\t\t\tif not frappe.db.get_value(\"DocField\",\n\t\t\t\t{\"parent\": self.doc_type, \"fieldname\": df.fieldname}, \"allow_on_submit\"):\n\t\t\t\tfrappe.msgprint(_(\"Row {0}: Not allowed to enable Allow on Submit for standard fields\")\\\n\t\t\t\t\t.format(df.idx))\n\t\t\t\treturn False\n\n\t\telif prop == \"reqd\" and \\\n\t\t\t((frappe.db.get_value(\"DocField\",\n\t\t\t\t{\"parent\":self.doc_type,\"fieldname\":df.fieldname}, \"reqd\") == 1) \\\n\t\t\t\tand (df.get(prop) == 0)):\n\t\t\tfrappe.msgprint(_(\"Row {0}: Not allowed to disable Mandatory for standard fields\")\\\n\t\t\t\t\t.format(df.idx))\n\t\t\treturn False\n\n\t\telif prop == \"in_list_view\" and df.get(prop) \\\n\t\t\tand df.fieldtype!=\"Attach Image\" and df.fieldtype in no_value_fields:\n\t\t\t\t\tfrappe.msgprint(_(\"'In List View' not allowed for type {0} in row {1}\")\n\t\t\t\t\t\t.format(df.fieldtype, df.idx))\n\t\t\t\t\treturn False\n\n\t\telif prop == \"precision\" and cint(df.get(\"precision\")) > 6 \\\n\t\t\t\tand cint(df.get(\"precision\")) > cint(meta_df[0].get(\"precision\")):\n\t\t\tself.flags.update_db = True\n\n\t\telif prop == \"unique\":\n\t\t\tself.flags.update_db = True\n\n\t\telif (prop == \"read_only\" and cint(df.get(\"read_only\"))==0\n\t\t\t\tand frappe.db.get_value(\"DocField\", {\"parent\": self.doc_type,\n\t\t\t\t\"fieldname\": df.fieldname}, \"read_only\")==1):\n\t\t\t# if docfield has read_only checked and user is trying to make it editable, don't allow it\n\t\t\tfrappe.msgprint(_(\"You cannot unset 'Read Only' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif prop == \"options\" and df.get(\"fieldtype\") not in ALLOWED_OPTIONS_CHANGE:\n\t\t\tfrappe.msgprint(_(\"You can't set 'Options' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif prop == 'translatable' and not supports_translation(df.get('fieldtype')):\n\t\t\tfrappe.msgprint(_(\"You can't set 'Translatable' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif (prop == 'in_global_search' and\n\t\t\tdf.in_global_search != meta_df[0].get(\"in_global_search\")):\n\t\t\tself.flags.rebuild_doctype_for_global_search = True\n\n\t\treturn True\n\n\tdef set_property_setters_for_actions_and_links(self, meta):\n\t\t'''\n\t\tApply property setters or create custom records for DocType Action and DocType Link\n\t\t'''\n\t\tfor doctype, fieldname, field_map in (\n\t\t\t\t('DocType Link', 'links', doctype_link_properties),\n\t\t\t\t('DocType Action', 'actions', doctype_action_properties)\n\t\t\t):\n\t\t\thas_custom = False\n\t\t\titems = []\n\t\t\tfor i, d in enumerate(self.get(fieldname) or []):\n\t\t\t\td.idx = i\n\t\t\t\tif frappe.db.exists(doctype, d.name) and not d.custom:\n\t\t\t\t\t# check property and apply property setter\n\t\t\t\t\toriginal = frappe.get_doc(doctype, d.name)\n\t\t\t\t\tfor prop, prop_type in field_map.items():\n\t\t\t\t\t\tif d.get(prop) != original.get(prop):\n\t\t\t\t\t\t\tself.make_property_setter(prop, d.get(prop), prop_type,\n\t\t\t\t\t\t\t\tapply_on=doctype, row_name=d.name)\n\t\t\t\t\titems.append(d.name)\n\t\t\t\telse:\n\t\t\t\t\t# custom - just insert/update\n\t\t\t\t\td.parent = self.doc_type\n\t\t\t\t\td.custom = 1\n\t\t\t\t\td.save(ignore_permissions=True)\n\t\t\t\t\thas_custom = True\n\t\t\t\t\titems.append(d.name)\n\n\t\t\tself.update_order_property_setter(has_custom, fieldname)\n\t\t\tself.clear_removed_items(doctype, items)\n\n\tdef update_order_property_setter(self, has_custom, fieldname):\n\t\t'''\n\t\tWe need to maintain the order of the link/actions if the user has shuffled them.\n\t\tSo we create a new property (ex `links_order`) to keep a list of items.\n\t\t'''\n\t\tproperty_name = '{}_order'.format(fieldname)\n\t\tif has_custom:\n\t\t\t# save the order of the actions and links\n\t\t\tself.make_property_setter(property_name,\n\t\t\t\tjson.dumps([d.name for d in self.get(fieldname)]), 'Small Text')\n\t\telse:\n\t\t\tfrappe.db.delete('Property Setter', dict(property=property_name,\n\t\t\t\tdoc_type=self.doc_type))\n\n\n\tdef clear_removed_items(self, doctype, items):\n\t\t'''\n\t\tClear rows that do not appear in `items`. These have been removed by the user.\n\t\t'''\n\t\tif items:\n\t\t\tfrappe.db.delete(doctype, dict(parent=self.doc_type, custom=1,\n\t\t\t\tname=('not in', items)))\n\t\telse:\n\t\t\tfrappe.db.delete(doctype, dict(parent=self.doc_type, custom=1))\n\n\tdef update_custom_fields(self):\n\t\tfor i, df in enumerate(self.get(\"fields\")):\n\t\t\tif df.get(\"is_custom_field\"):\n\t\t\t\tif not frappe.db.exists('Custom Field', {'dt': self.doc_type, 'fieldname': df.fieldname}):\n\t\t\t\t\tself.add_custom_field(df, i)\n\t\t\t\t\tself.flags.update_db = True\n\t\t\t\telse:\n\t\t\t\t\tself.update_in_custom_field(df, i)\n\n\t\tself.delete_custom_fields()\n\n\tdef add_custom_field(self, df, i):\n\t\td = frappe.new_doc(\"Custom Field\")\n\n\t\td.dt = self.doc_type\n\n\t\tfor prop in docfield_properties:\n\t\t\td.set(prop, df.get(prop))\n\n\t\tif i!=0:\n\t\t\td.insert_after = self.fields[i-1].fieldname\n\t\td.idx = i\n\n\t\td.insert()\n\t\tdf.fieldname = d.fieldname\n\n\tdef update_in_custom_field(self, df, i):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\t\tmeta_df = meta.get(\"fields\", {\"fieldname\": df.fieldname})\n\t\tif not (meta_df and meta_df[0].get(\"is_custom_field\")):\n\t\t\t# not a custom field\n\t\t\treturn\n\n\t\tcustom_field = frappe.get_doc(\"Custom Field\", meta_df[0].name)\n\t\tchanged = False\n\t\tfor prop in docfield_properties:\n\t\t\tif df.get(prop) != custom_field.get(prop):\n\t\t\t\tif prop == \"fieldtype\":\n\t\t\t\t\tself.validate_fieldtype_change(df, meta_df[0].get(prop), df.get(prop))\n\n\t\t\t\tcustom_field.set(prop, df.get(prop))\n\t\t\t\tchanged = True\n\n\t\t# check and update `insert_after` property\n\t\tif i!=0:\n\t\t\tinsert_after = self.fields[i-1].fieldname\n\t\t\tif custom_field.insert_after != insert_after:\n\t\t\t\tcustom_field.insert_after = insert_after\n\t\t\t\tcustom_field.idx = i\n\t\t\t\tchanged = True\n\n\t\tif changed:\n\t\t\tcustom_field.db_update()\n\t\t\tself.flags.update_db = True\n\t\t\t#custom_field.save()\n\n\tdef delete_custom_fields(self):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\t\tfields_to_remove = (set([df.fieldname for df in meta.get(\"fields\")])\n\t\t\t- set(df.fieldname for df in self.get(\"fields\")))\n\n\t\tfor fieldname in fields_to_remove:\n\t\t\tdf = meta.get(\"fields\", {\"fieldname\": fieldname})[0]\n\t\t\tif df.get(\"is_custom_field\"):\n\t\t\t\tfrappe.delete_doc(\"Custom Field\", df.name)\n\n\tdef make_property_setter(self, prop, value, property_type, fieldname=None,\n\t\tapply_on=None, row_name = None):\n\t\tdelete_property_setter(self.doc_type, prop, fieldname, row_name)\n\n\t\tproperty_value = self.get_existing_property_value(prop, fieldname)\n\n\t\tif property_value==value:\n\t\t\treturn\n\n\t\tif not apply_on:\n\t\t\tapply_on = \"DocField\" if fieldname else \"DocType\"\n\n\t\t# create a new property setter\n\t\tfrappe.make_property_setter({\n\t\t\t\"doctype\": self.doc_type,\n\t\t\t\"doctype_or_field\": apply_on,\n\t\t\t\"fieldname\": fieldname,\n\t\t\t\"row_name\": row_name,\n\t\t\t\"property\": prop,\n\t\t\t\"value\": value,\n\t\t\t\"property_type\": property_type\n\t\t})\n\n\tdef get_existing_property_value(self, property_name, fieldname=None):\n\t\t# check if there is any need to make property setter!\n\t\tif fieldname:\n\t\t\tproperty_value = frappe.db.get_value(\"DocField\", {\"parent\": self.doc_type,\n\t\t\t\t\"fieldname\": fieldname}, property_name)\n\t\telse:\n\t\t\tif frappe.db.has_column(\"DocType\", property_name):\n\t\t\t\tproperty_value = frappe.db.get_value(\"DocType\", self.doc_type, property_name)\n\t\t\telse:\n\t\t\t\tproperty_value = None\n\n\t\treturn property_value\n\n\tdef validate_fieldtype_change(self, df, old_value, new_value):\n\t\tallowed = self.allow_fieldtype_change(old_value, new_value)\n\t\tif allowed:\n\t\t\told_value_length = cint(frappe.db.type_map.get(old_value)[1])\n\t\t\tnew_value_length = cint(frappe.db.type_map.get(new_value)[1])\n\n\t\t\t# Ignore fieldtype check validation if new field type has unspecified maxlength\n\t\t\t# Changes like DATA to TEXT, where new_value_lenth equals 0 will not be validated\n\t\t\tif new_value_length and (old_value_length > new_value_length):\n\t\t\t\tself.check_length_for_fieldtypes.append({'df': df, 'old_value': old_value})\n\t\t\t\tself.validate_fieldtype_length()\n\t\t\telse:\n\t\t\t\tself.flags.update_db = True\n\t\tif not allowed:\n\t\t\tfrappe.throw(_(\"Fieldtype cannot be changed from {0} to {1} in row {2}\").format(old_value, new_value, df.idx))\n\n\tdef validate_fieldtype_length(self):\n\t\tfor field in self.check_length_for_fieldtypes:\n\t\t\tdf = field.get('df')\n\t\t\tmax_length = cint(frappe.db.type_map.get(df.fieldtype)[1])\n\t\t\tfieldname = df.fieldname\n\t\t\tdocs = frappe.db.sql('''\n\t\t\t\tSELECT name, {fieldname}, LENGTH({fieldname}) AS len\n\t\t\t\tFROM `tab{doctype}`\n\t\t\t\tWHERE LENGTH({fieldname}) > {max_length}\n\t\t\t'''.format(\n\t\t\t\tfieldname=fieldname,\n\t\t\t\tdoctype=self.doc_type,\n\t\t\t\tmax_length=max_length\n\t\t\t), as_dict=True)\n\t\t\tlinks = []\n\t\t\tlabel = df.label\n\t\t\tfor doc in docs:\n\t\t\t\tlinks.append(frappe.utils.get_link_to_form(self.doc_type, doc.name))\n\t\t\tlinks_str = ', '.join(links)\n\n\t\t\tif docs:\n\t\t\t\tfrappe.throw(_('Value for field {0} is too long in {1}. Length should be lesser than {2} characters')\n\t\t\t\t\t.format(\n\t\t\t\t\t\tfrappe.bold(label),\n\t\t\t\t\t\tlinks_str,\n\t\t\t\t\t\tfrappe.bold(max_length)\n\t\t\t\t\t), title=_('Data Too Long'), is_minimizable=len(docs) > 1)\n\n\t\tself.flags.update_db = True\n\n\[email protected]()\n\tdef reset_to_defaults(self):\n\t\tif not self.doc_type:\n\t\t\treturn\n\n\t\treset_customization(self.doc_type)\n\t\tself.fetch_to_customize()\n\n\t@classmethod\n\tdef allow_fieldtype_change(self, old_type: str, new_type: str) -> bool:\n\t\t\"\"\" allow type change, if both old_type and new_type are in same field group.\n\t\tfield groups are defined in ALLOWED_FIELDTYPE_CHANGE variables.\n\t\t\"\"\"\n\t\tin_field_group = lambda group: (old_type in group) and (new_type in group)\n\t\treturn any(map(in_field_group, ALLOWED_FIELDTYPE_CHANGE))\n\ndef reset_customization(doctype):\n\tsetters = frappe.get_all(\"Property Setter\", filters={\n\t\t'doc_type': doctype,\n\t\t'field_name': ['!=', 'naming_series'],\n\t\t'property': ['!=', 'options']\n\t}, pluck='name')\n\n\tfor setter in setters:\n\t\tfrappe.delete_doc(\"Property Setter\", setter)\n\n\tfrappe.clear_cache(doctype=doctype)\n\ndoctype_properties = {\n\t'search_fields': 'Data',\n\t'title_field': 'Data',\n\t'image_field': 'Data',\n\t'sort_field': 'Data',\n\t'sort_order': 'Data',\n\t'default_print_format': 'Data',\n\t'allow_copy': 'Check',\n\t'istable': 'Check',\n\t'quick_entry': 'Check',\n\t'editable_grid': 'Check',\n\t'max_attachments': 'Int',\n\t'track_changes': 'Check',\n\t'track_views': 'Check',\n\t'allow_auto_repeat': 'Check',\n\t'allow_import': 'Check',\n\t'show_preview_popup': 'Check',\n\t'default_email_template': 'Data',\n\t'email_append_to': 'Check',\n\t'subject_field': 'Data',\n\t'sender_field': 'Data',\n\t'autoname': 'Data'\n}\n\ndocfield_properties = {\n\t'idx': 'Int',\n\t'label': 'Data',\n\t'fieldtype': 'Select',\n\t'options': 'Text',\n\t'fetch_from': 'Small Text',\n\t'fetch_if_empty': 'Check',\n\t'permlevel': 'Int',\n\t'width': 'Data',\n\t'print_width': 'Data',\n\t'non_negative': 'Check',\n\t'reqd': 'Check',\n\t'unique': 'Check',\n\t'ignore_user_permissions': 'Check',\n\t'in_list_view': 'Check',\n\t'in_standard_filter': 'Check',\n\t'in_global_search': 'Check',\n\t'in_preview': 'Check',\n\t'bold': 'Check',\n\t'hidden': 'Check',\n\t'collapsible': 'Check',\n\t'collapsible_depends_on': 'Data',\n\t'print_hide': 'Check',\n\t'print_hide_if_no_value': 'Check',\n\t'report_hide': 'Check',\n\t'allow_on_submit': 'Check',\n\t'translatable': 'Check',\n\t'mandatory_depends_on': 'Data',\n\t'read_only_depends_on': 'Data',\n\t'depends_on': 'Data',\n\t'description': 'Text',\n\t'default': 'Text',\n\t'precision': 'Select',\n\t'read_only': 'Check',\n\t'length': 'Int',\n\t'columns': 'Int',\n\t'remember_last_selected_value': 'Check',\n\t'allow_bulk_edit': 'Check',\n\t'auto_repeat': 'Link',\n\t'allow_in_quick_entry': 'Check',\n\t'hide_border': 'Check',\n\t'hide_days': 'Check',\n\t'hide_seconds': 'Check'\n}\n\ndoctype_link_properties = {\n\t'link_doctype': 'Link',\n\t'link_fieldname': 'Data',\n\t'group': 'Data',\n\t'hidden': 'Check'\n}\n\ndoctype_action_properties = {\n\t'label': 'Link',\n\t'action_type': 'Select',\n\t'action': 'Small Text',\n\t'group': 'Data',\n\t'hidden': 'Check'\n}\n\n\nALLOWED_FIELDTYPE_CHANGE = (\n\t('Currency', 'Float', 'Percent'),\n\t('Small Text', 'Data'),\n\t('Text', 'Data'),\n\t('Text', 'Text Editor', 'Code', 'Signature', 'HTML Editor'),\n\t('Data', 'Select'),\n\t('Text', 'Small Text'),\n\t('Text', 'Data', 'Barcode'),\n\t('Code', 'Geolocation'),\n\t('Table', 'Table MultiSelect'))\n\nALLOWED_OPTIONS_CHANGE = ('Read Only', 'HTML', 'Select', 'Data')\n", "path": "frappe/custom/doctype/customize_form/customize_form.py" } ]
[ { "content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\n\"\"\"\n\tCustomize Form is a Single DocType used to mask the Property Setter\n\tThus providing a better UI from user perspective\n\"\"\"\nimport json\nimport frappe\nimport frappe.translate\nfrom frappe import _\nfrom frappe.utils import cint\nfrom frappe.model.document import Document\nfrom frappe.model import no_value_fields, core_doctypes_list\nfrom frappe.core.doctype.doctype.doctype import validate_fields_for_doctype, check_email_append_to\nfrom frappe.custom.doctype.custom_field.custom_field import create_custom_field\nfrom frappe.custom.doctype.property_setter.property_setter import delete_property_setter\nfrom frappe.model.docfield import supports_translation\nfrom frappe.core.doctype.doctype.doctype import validate_series\n\nclass CustomizeForm(Document):\n\tdef on_update(self):\n\t\tfrappe.db.sql(\"delete from tabSingles where doctype='Customize Form'\")\n\t\tfrappe.db.sql(\"delete from `tabCustomize Form Field`\")\n\n\[email protected]()\n\tdef fetch_to_customize(self):\n\t\tself.clear_existing_doc()\n\t\tif not self.doc_type:\n\t\t\treturn\n\n\t\tmeta = frappe.get_meta(self.doc_type)\n\n\t\tself.validate_doctype(meta)\n\n\t\t# load the meta properties on the customize (self) object\n\t\tself.load_properties(meta)\n\n\t\t# load custom translation\n\t\ttranslation = self.get_name_translation()\n\t\tself.label = translation.translated_text if translation else ''\n\n\t\tself.create_auto_repeat_custom_field_if_required(meta)\n\n\t\t# NOTE doc (self) is sent to clientside by run_method\n\n\tdef validate_doctype(self, meta):\n\t\t'''\n\t\tCheck if the doctype is allowed to be customized.\n\t\t'''\n\t\tif self.doc_type in core_doctypes_list:\n\t\t\tfrappe.throw(_(\"Core DocTypes cannot be customized.\"))\n\n\t\tif meta.issingle:\n\t\t\tfrappe.throw(_(\"Single DocTypes cannot be customized.\"))\n\n\t\tif meta.custom:\n\t\t\tfrappe.throw(_(\"Only standard DocTypes are allowed to be customized from Customize Form.\"))\n\n\tdef load_properties(self, meta):\n\t\t'''\n\t\tLoad the customize object (this) with the metadata properties\n\t\t'''\n\t\t# doctype properties\n\t\tfor prop in doctype_properties:\n\t\t\tself.set(prop, meta.get(prop))\n\n\t\tfor d in meta.get(\"fields\"):\n\t\t\tnew_d = {\"fieldname\": d.fieldname, \"is_custom_field\": d.get(\"is_custom_field\"), \"name\": d.name}\n\t\t\tfor prop in docfield_properties:\n\t\t\t\tnew_d[prop] = d.get(prop)\n\t\t\tself.append(\"fields\", new_d)\n\n\t\tfor fieldname in ('links', 'actions'):\n\t\t\tfor d in meta.get(fieldname):\n\t\t\t\tself.append(fieldname, d)\n\n\tdef create_auto_repeat_custom_field_if_required(self, meta):\n\t\t'''\n\t\tCreate auto repeat custom field if it's not already present\n\t\t'''\n\t\tif self.allow_auto_repeat:\n\t\t\tall_fields = [df.fieldname for df in meta.fields]\n\n\t\t\tif \"auto_repeat\" in all_fields:\n\t\t\t\treturn\n\n\t\t\tinsert_after = self.fields[len(self.fields) - 1].fieldname\n\t\t\tcreate_custom_field(self.doc_type, dict(\n\t\t\t\tfieldname='auto_repeat',\n\t\t\t\tlabel='Auto Repeat',\n\t\t\t\tfieldtype='Link',\n\t\t\t\toptions='Auto Repeat',\n\t\t\t\tinsert_after=insert_after,\n\t\t\t\tread_only=1, no_copy=1, print_hide=1\n\t\t\t))\n\n\n\tdef get_name_translation(self):\n\t\t'''Get translation object if exists of current doctype name in the default language'''\n\t\treturn frappe.get_value('Translation', {\n\t\t\t\t'source_text': self.doc_type,\n\t\t\t\t'language': frappe.local.lang or 'en'\n\t\t\t}, ['name', 'translated_text'], as_dict=True)\n\n\tdef set_name_translation(self):\n\t\t'''Create, update custom translation for this doctype'''\n\t\tcurrent = self.get_name_translation()\n\t\tif not self.label:\n\t\t\tif current:\n\t\t\t\t# clear translation\n\t\t\t\tfrappe.delete_doc('Translation', current.name)\n\t\t\treturn\n\n\t\tif not current:\n\t\t\tfrappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": 'Translation',\n\t\t\t\t\t\"source_text\": self.doc_type,\n\t\t\t\t\t\"translated_text\": self.label,\n\t\t\t\t\t\"language_code\": frappe.local.lang or 'en'\n\t\t\t\t}\n\t\t\t).insert()\n\t\t\treturn\n\n\t\tif self.label != current.translated_text:\n\t\t\tfrappe.db.set_value('Translation', current.name, 'translated_text', self.label)\n\t\t\tfrappe.translate.clear_cache()\n\n\tdef clear_existing_doc(self):\n\t\tdoc_type = self.doc_type\n\n\t\tfor fieldname in self.meta.get_valid_columns():\n\t\t\tself.set(fieldname, None)\n\n\t\tfor df in self.meta.get_table_fields():\n\t\t\tself.set(df.fieldname, [])\n\n\t\tself.doc_type = doc_type\n\t\tself.name = \"Customize Form\"\n\n\[email protected]()\n\tdef save_customization(self):\n\t\tif not self.doc_type:\n\t\t\treturn\n\t\tvalidate_series(self, self.autoname, self.doc_type)\n\t\tself.flags.update_db = False\n\t\tself.flags.rebuild_doctype_for_global_search = False\n\t\tself.set_property_setters()\n\t\tself.update_custom_fields()\n\t\tself.set_name_translation()\n\t\tvalidate_fields_for_doctype(self.doc_type)\n\t\tcheck_email_append_to(self)\n\n\t\tif self.flags.update_db:\n\t\t\tfrappe.db.updatedb(self.doc_type)\n\n\t\tif not hasattr(self, 'hide_success') or not self.hide_success:\n\t\t\tfrappe.msgprint(_(\"{0} updated\").format(_(self.doc_type)), alert=True)\n\t\tfrappe.clear_cache(doctype=self.doc_type)\n\t\tself.fetch_to_customize()\n\n\t\tif self.flags.rebuild_doctype_for_global_search:\n\t\t\tfrappe.enqueue('frappe.utils.global_search.rebuild_for_doctype',\n\t\t\t\tnow=True, doctype=self.doc_type)\n\n\tdef set_property_setters(self):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\n\t\t# doctype\n\t\tself.set_property_setters_for_doctype(meta)\n\n\t\t# docfield\n\t\tfor df in self.get(\"fields\"):\n\t\t\tmeta_df = meta.get(\"fields\", {\"fieldname\": df.fieldname})\n\t\t\tif not meta_df or meta_df[0].get(\"is_custom_field\"):\n\t\t\t\tcontinue\n\t\t\tself.set_property_setters_for_docfield(meta, df, meta_df)\n\n\t\t# action and links\n\t\tself.set_property_setters_for_actions_and_links(meta)\n\n\tdef set_property_setters_for_doctype(self, meta):\n\t\tfor prop, prop_type in doctype_properties.items():\n\t\t\tif self.get(prop) != meta.get(prop):\n\t\t\t\tself.make_property_setter(prop, self.get(prop), prop_type)\n\n\tdef set_property_setters_for_docfield(self, meta, df, meta_df):\n\t\tfor prop, prop_type in docfield_properties.items():\n\t\t\tif prop != \"idx\" and (df.get(prop) or '') != (meta_df[0].get(prop) or ''):\n\t\t\t\tif not self.allow_property_change(prop, meta_df, df):\n\t\t\t\t\tcontinue\n\n\t\t\t\tself.make_property_setter(prop, df.get(prop), prop_type,\n\t\t\t\t\tfieldname=df.fieldname)\n\n\tdef allow_property_change(self, prop, meta_df, df):\n\t\tif prop == \"fieldtype\":\n\t\t\tself.validate_fieldtype_change(df, meta_df[0].get(prop), df.get(prop))\n\n\t\telif prop == \"length\":\n\t\t\told_value_length = cint(meta_df[0].get(prop))\n\t\t\tnew_value_length = cint(df.get(prop))\n\n\t\t\tif new_value_length and (old_value_length > new_value_length):\n\t\t\t\tself.check_length_for_fieldtypes.append({'df': df, 'old_value': meta_df[0].get(prop)})\n\t\t\t\tself.validate_fieldtype_length()\n\t\t\telse:\n\t\t\t\tself.flags.update_db = True\n\n\t\telif prop == \"allow_on_submit\" and df.get(prop):\n\t\t\tif not frappe.db.get_value(\"DocField\",\n\t\t\t\t{\"parent\": self.doc_type, \"fieldname\": df.fieldname}, \"allow_on_submit\"):\n\t\t\t\tfrappe.msgprint(_(\"Row {0}: Not allowed to enable Allow on Submit for standard fields\")\\\n\t\t\t\t\t.format(df.idx))\n\t\t\t\treturn False\n\n\t\telif prop == \"reqd\" and \\\n\t\t\t((frappe.db.get_value(\"DocField\",\n\t\t\t\t{\"parent\":self.doc_type,\"fieldname\":df.fieldname}, \"reqd\") == 1) \\\n\t\t\t\tand (df.get(prop) == 0)):\n\t\t\tfrappe.msgprint(_(\"Row {0}: Not allowed to disable Mandatory for standard fields\")\\\n\t\t\t\t\t.format(df.idx))\n\t\t\treturn False\n\n\t\telif prop == \"in_list_view\" and df.get(prop) \\\n\t\t\tand df.fieldtype!=\"Attach Image\" and df.fieldtype in no_value_fields:\n\t\t\t\t\tfrappe.msgprint(_(\"'In List View' not allowed for type {0} in row {1}\")\n\t\t\t\t\t\t.format(df.fieldtype, df.idx))\n\t\t\t\t\treturn False\n\n\t\telif prop == \"precision\" and cint(df.get(\"precision\")) > 6 \\\n\t\t\t\tand cint(df.get(\"precision\")) > cint(meta_df[0].get(\"precision\")):\n\t\t\tself.flags.update_db = True\n\n\t\telif prop == \"unique\":\n\t\t\tself.flags.update_db = True\n\n\t\telif (prop == \"read_only\" and cint(df.get(\"read_only\"))==0\n\t\t\t\tand frappe.db.get_value(\"DocField\", {\"parent\": self.doc_type,\n\t\t\t\t\"fieldname\": df.fieldname}, \"read_only\")==1):\n\t\t\t# if docfield has read_only checked and user is trying to make it editable, don't allow it\n\t\t\tfrappe.msgprint(_(\"You cannot unset 'Read Only' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif prop == \"options\" and df.get(\"fieldtype\") not in ALLOWED_OPTIONS_CHANGE:\n\t\t\tfrappe.msgprint(_(\"You can't set 'Options' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif prop == 'translatable' and not supports_translation(df.get('fieldtype')):\n\t\t\tfrappe.msgprint(_(\"You can't set 'Translatable' for field {0}\").format(df.label))\n\t\t\treturn False\n\n\t\telif (prop == 'in_global_search' and\n\t\t\tdf.in_global_search != meta_df[0].get(\"in_global_search\")):\n\t\t\tself.flags.rebuild_doctype_for_global_search = True\n\n\t\treturn True\n\n\tdef set_property_setters_for_actions_and_links(self, meta):\n\t\t'''\n\t\tApply property setters or create custom records for DocType Action and DocType Link\n\t\t'''\n\t\tfor doctype, fieldname, field_map in (\n\t\t\t\t('DocType Link', 'links', doctype_link_properties),\n\t\t\t\t('DocType Action', 'actions', doctype_action_properties)\n\t\t\t):\n\t\t\thas_custom = False\n\t\t\titems = []\n\t\t\tfor i, d in enumerate(self.get(fieldname) or []):\n\t\t\t\td.idx = i\n\t\t\t\tif frappe.db.exists(doctype, d.name) and not d.custom:\n\t\t\t\t\t# check property and apply property setter\n\t\t\t\t\toriginal = frappe.get_doc(doctype, d.name)\n\t\t\t\t\tfor prop, prop_type in field_map.items():\n\t\t\t\t\t\tif d.get(prop) != original.get(prop):\n\t\t\t\t\t\t\tself.make_property_setter(prop, d.get(prop), prop_type,\n\t\t\t\t\t\t\t\tapply_on=doctype, row_name=d.name)\n\t\t\t\t\titems.append(d.name)\n\t\t\t\telse:\n\t\t\t\t\t# custom - just insert/update\n\t\t\t\t\td.parent = self.doc_type\n\t\t\t\t\td.custom = 1\n\t\t\t\t\td.save(ignore_permissions=True)\n\t\t\t\t\thas_custom = True\n\t\t\t\t\titems.append(d.name)\n\n\t\t\tself.update_order_property_setter(has_custom, fieldname)\n\t\t\tself.clear_removed_items(doctype, items)\n\n\tdef update_order_property_setter(self, has_custom, fieldname):\n\t\t'''\n\t\tWe need to maintain the order of the link/actions if the user has shuffled them.\n\t\tSo we create a new property (ex `links_order`) to keep a list of items.\n\t\t'''\n\t\tproperty_name = '{}_order'.format(fieldname)\n\t\tif has_custom:\n\t\t\t# save the order of the actions and links\n\t\t\tself.make_property_setter(property_name,\n\t\t\t\tjson.dumps([d.name for d in self.get(fieldname)]), 'Small Text')\n\t\telse:\n\t\t\tfrappe.db.delete('Property Setter', dict(property=property_name,\n\t\t\t\tdoc_type=self.doc_type))\n\n\n\tdef clear_removed_items(self, doctype, items):\n\t\t'''\n\t\tClear rows that do not appear in `items`. These have been removed by the user.\n\t\t'''\n\t\tif items:\n\t\t\tfrappe.db.delete(doctype, dict(parent=self.doc_type, custom=1,\n\t\t\t\tname=('not in', items)))\n\t\telse:\n\t\t\tfrappe.db.delete(doctype, dict(parent=self.doc_type, custom=1))\n\n\tdef update_custom_fields(self):\n\t\tfor i, df in enumerate(self.get(\"fields\")):\n\t\t\tif df.get(\"is_custom_field\"):\n\t\t\t\tif not frappe.db.exists('Custom Field', {'dt': self.doc_type, 'fieldname': df.fieldname}):\n\t\t\t\t\tself.add_custom_field(df, i)\n\t\t\t\t\tself.flags.update_db = True\n\t\t\t\telse:\n\t\t\t\t\tself.update_in_custom_field(df, i)\n\n\t\tself.delete_custom_fields()\n\n\tdef add_custom_field(self, df, i):\n\t\td = frappe.new_doc(\"Custom Field\")\n\n\t\td.dt = self.doc_type\n\n\t\tfor prop in docfield_properties:\n\t\t\td.set(prop, df.get(prop))\n\n\t\tif i!=0:\n\t\t\td.insert_after = self.fields[i-1].fieldname\n\t\td.idx = i\n\n\t\td.insert()\n\t\tdf.fieldname = d.fieldname\n\n\tdef update_in_custom_field(self, df, i):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\t\tmeta_df = meta.get(\"fields\", {\"fieldname\": df.fieldname})\n\t\tif not (meta_df and meta_df[0].get(\"is_custom_field\")):\n\t\t\t# not a custom field\n\t\t\treturn\n\n\t\tcustom_field = frappe.get_doc(\"Custom Field\", meta_df[0].name)\n\t\tchanged = False\n\t\tfor prop in docfield_properties:\n\t\t\tif df.get(prop) != custom_field.get(prop):\n\t\t\t\tif prop == \"fieldtype\":\n\t\t\t\t\tself.validate_fieldtype_change(df, meta_df[0].get(prop), df.get(prop))\n\n\t\t\t\tcustom_field.set(prop, df.get(prop))\n\t\t\t\tchanged = True\n\n\t\t# check and update `insert_after` property\n\t\tif i!=0:\n\t\t\tinsert_after = self.fields[i-1].fieldname\n\t\t\tif custom_field.insert_after != insert_after:\n\t\t\t\tcustom_field.insert_after = insert_after\n\t\t\t\tcustom_field.idx = i\n\t\t\t\tchanged = True\n\n\t\tif changed:\n\t\t\tcustom_field.db_update()\n\t\t\tself.flags.update_db = True\n\t\t\t#custom_field.save()\n\n\tdef delete_custom_fields(self):\n\t\tmeta = frappe.get_meta(self.doc_type)\n\t\tfields_to_remove = (set([df.fieldname for df in meta.get(\"fields\")])\n\t\t\t- set(df.fieldname for df in self.get(\"fields\")))\n\n\t\tfor fieldname in fields_to_remove:\n\t\t\tdf = meta.get(\"fields\", {\"fieldname\": fieldname})[0]\n\t\t\tif df.get(\"is_custom_field\"):\n\t\t\t\tfrappe.delete_doc(\"Custom Field\", df.name)\n\n\tdef make_property_setter(self, prop, value, property_type, fieldname=None,\n\t\tapply_on=None, row_name = None):\n\t\tdelete_property_setter(self.doc_type, prop, fieldname, row_name)\n\n\t\tproperty_value = self.get_existing_property_value(prop, fieldname)\n\n\t\tif property_value==value:\n\t\t\treturn\n\n\t\tif not apply_on:\n\t\t\tapply_on = \"DocField\" if fieldname else \"DocType\"\n\n\t\t# create a new property setter\n\t\tfrappe.make_property_setter({\n\t\t\t\"doctype\": self.doc_type,\n\t\t\t\"doctype_or_field\": apply_on,\n\t\t\t\"fieldname\": fieldname,\n\t\t\t\"row_name\": row_name,\n\t\t\t\"property\": prop,\n\t\t\t\"value\": value,\n\t\t\t\"property_type\": property_type\n\t\t})\n\n\tdef get_existing_property_value(self, property_name, fieldname=None):\n\t\t# check if there is any need to make property setter!\n\t\tif fieldname:\n\t\t\tproperty_value = frappe.db.get_value(\"DocField\", {\"parent\": self.doc_type,\n\t\t\t\t\"fieldname\": fieldname}, property_name)\n\t\telse:\n\t\t\tif frappe.db.has_column(\"DocType\", property_name):\n\t\t\t\tproperty_value = frappe.db.get_value(\"DocType\", self.doc_type, property_name)\n\t\t\telse:\n\t\t\t\tproperty_value = None\n\n\t\treturn property_value\n\n\tdef validate_fieldtype_change(self, df, old_value, new_value):\n\t\tallowed = self.allow_fieldtype_change(old_value, new_value)\n\t\tif allowed:\n\t\t\told_value_length = cint(frappe.db.type_map.get(old_value)[1])\n\t\t\tnew_value_length = cint(frappe.db.type_map.get(new_value)[1])\n\n\t\t\t# Ignore fieldtype check validation if new field type has unspecified maxlength\n\t\t\t# Changes like DATA to TEXT, where new_value_lenth equals 0 will not be validated\n\t\t\tif new_value_length and (old_value_length > new_value_length):\n\t\t\t\tself.check_length_for_fieldtypes.append({'df': df, 'old_value': old_value})\n\t\t\t\tself.validate_fieldtype_length()\n\t\t\telse:\n\t\t\t\tself.flags.update_db = True\n\t\tif not allowed:\n\t\t\tfrappe.throw(_(\"Fieldtype cannot be changed from {0} to {1} in row {2}\").format(old_value, new_value, df.idx))\n\n\tdef validate_fieldtype_length(self):\n\t\tfor field in self.check_length_for_fieldtypes:\n\t\t\tdf = field.get('df')\n\t\t\tmax_length = cint(frappe.db.type_map.get(df.fieldtype)[1])\n\t\t\tfieldname = df.fieldname\n\t\t\tdocs = frappe.db.sql('''\n\t\t\t\tSELECT name, {fieldname}, LENGTH({fieldname}) AS len\n\t\t\t\tFROM `tab{doctype}`\n\t\t\t\tWHERE LENGTH({fieldname}) > {max_length}\n\t\t\t'''.format(\n\t\t\t\tfieldname=fieldname,\n\t\t\t\tdoctype=self.doc_type,\n\t\t\t\tmax_length=max_length\n\t\t\t), as_dict=True)\n\t\t\tlinks = []\n\t\t\tlabel = df.label\n\t\t\tfor doc in docs:\n\t\t\t\tlinks.append(frappe.utils.get_link_to_form(self.doc_type, doc.name))\n\t\t\tlinks_str = ', '.join(links)\n\n\t\t\tif docs:\n\t\t\t\tfrappe.throw(_('Value for field {0} is too long in {1}. Length should be lesser than {2} characters')\n\t\t\t\t\t.format(\n\t\t\t\t\t\tfrappe.bold(label),\n\t\t\t\t\t\tlinks_str,\n\t\t\t\t\t\tfrappe.bold(max_length)\n\t\t\t\t\t), title=_('Data Too Long'), is_minimizable=len(docs) > 1)\n\n\t\tself.flags.update_db = True\n\n\[email protected]()\n\tdef reset_to_defaults(self):\n\t\tif not self.doc_type:\n\t\t\treturn\n\n\t\treset_customization(self.doc_type)\n\t\tself.fetch_to_customize()\n\n\t@classmethod\n\tdef allow_fieldtype_change(self, old_type: str, new_type: str) -> bool:\n\t\t\"\"\" allow type change, if both old_type and new_type are in same field group.\n\t\tfield groups are defined in ALLOWED_FIELDTYPE_CHANGE variables.\n\t\t\"\"\"\n\t\tin_field_group = lambda group: (old_type in group) and (new_type in group)\n\t\treturn any(map(in_field_group, ALLOWED_FIELDTYPE_CHANGE))\n\ndef reset_customization(doctype):\n\tsetters = frappe.get_all(\"Property Setter\", filters={\n\t\t'doc_type': doctype,\n\t\t'field_name': ['!=', 'naming_series'],\n\t\t'property': ['!=', 'options']\n\t}, pluck='name')\n\n\tfor setter in setters:\n\t\tfrappe.delete_doc(\"Property Setter\", setter)\n\n\tfrappe.clear_cache(doctype=doctype)\n\ndoctype_properties = {\n\t'search_fields': 'Data',\n\t'title_field': 'Data',\n\t'image_field': 'Data',\n\t'sort_field': 'Data',\n\t'sort_order': 'Data',\n\t'default_print_format': 'Data',\n\t'allow_copy': 'Check',\n\t'istable': 'Check',\n\t'quick_entry': 'Check',\n\t'editable_grid': 'Check',\n\t'max_attachments': 'Int',\n\t'track_changes': 'Check',\n\t'track_views': 'Check',\n\t'allow_auto_repeat': 'Check',\n\t'allow_import': 'Check',\n\t'show_preview_popup': 'Check',\n\t'default_email_template': 'Data',\n\t'email_append_to': 'Check',\n\t'subject_field': 'Data',\n\t'sender_field': 'Data',\n\t'autoname': 'Data'\n}\n\ndocfield_properties = {\n\t'idx': 'Int',\n\t'label': 'Data',\n\t'fieldtype': 'Select',\n\t'options': 'Text',\n\t'fetch_from': 'Small Text',\n\t'fetch_if_empty': 'Check',\n\t'permlevel': 'Int',\n\t'width': 'Data',\n\t'print_width': 'Data',\n\t'non_negative': 'Check',\n\t'reqd': 'Check',\n\t'unique': 'Check',\n\t'ignore_user_permissions': 'Check',\n\t'in_list_view': 'Check',\n\t'in_standard_filter': 'Check',\n\t'in_global_search': 'Check',\n\t'in_preview': 'Check',\n\t'bold': 'Check',\n\t'no_copy': 'Check',\n\t'hidden': 'Check',\n\t'collapsible': 'Check',\n\t'collapsible_depends_on': 'Data',\n\t'print_hide': 'Check',\n\t'print_hide_if_no_value': 'Check',\n\t'report_hide': 'Check',\n\t'allow_on_submit': 'Check',\n\t'translatable': 'Check',\n\t'mandatory_depends_on': 'Data',\n\t'read_only_depends_on': 'Data',\n\t'depends_on': 'Data',\n\t'description': 'Text',\n\t'default': 'Text',\n\t'precision': 'Select',\n\t'read_only': 'Check',\n\t'length': 'Int',\n\t'columns': 'Int',\n\t'remember_last_selected_value': 'Check',\n\t'allow_bulk_edit': 'Check',\n\t'auto_repeat': 'Link',\n\t'allow_in_quick_entry': 'Check',\n\t'hide_border': 'Check',\n\t'hide_days': 'Check',\n\t'hide_seconds': 'Check'\n}\n\ndoctype_link_properties = {\n\t'link_doctype': 'Link',\n\t'link_fieldname': 'Data',\n\t'group': 'Data',\n\t'hidden': 'Check'\n}\n\ndoctype_action_properties = {\n\t'label': 'Link',\n\t'action_type': 'Select',\n\t'action': 'Small Text',\n\t'group': 'Data',\n\t'hidden': 'Check'\n}\n\n\nALLOWED_FIELDTYPE_CHANGE = (\n\t('Currency', 'Float', 'Percent'),\n\t('Small Text', 'Data'),\n\t('Text', 'Data'),\n\t('Text', 'Text Editor', 'Code', 'Signature', 'HTML Editor'),\n\t('Data', 'Select'),\n\t('Text', 'Small Text'),\n\t('Text', 'Data', 'Barcode'),\n\t('Code', 'Geolocation'),\n\t('Table', 'Table MultiSelect'))\n\nALLOWED_OPTIONS_CHANGE = ('Read Only', 'HTML', 'Select', 'Data')\n", "path": "frappe/custom/doctype/customize_form/customize_form.py" } ]
diff --git a/frappe/custom/doctype/customize_form/customize_form.py b/frappe/custom/doctype/customize_form/customize_form.py index 53626a75217b..faa8e74c2feb 100644 --- a/frappe/custom/doctype/customize_form/customize_form.py +++ b/frappe/custom/doctype/customize_form/customize_form.py @@ -533,6 +533,7 @@ def reset_customization(doctype): 'in_global_search': 'Check', 'in_preview': 'Check', 'bold': 'Check', + 'no_copy': 'Check', 'hidden': 'Check', 'collapsible': 'Check', 'collapsible_depends_on': 'Data', diff --git a/frappe/custom/doctype/customize_form/test_customize_form.py b/frappe/custom/doctype/customize_form/test_customize_form.py index 7d87fd0f4c4f..1aa33e7ce90a 100644 --- a/frappe/custom/doctype/customize_form/test_customize_form.py +++ b/frappe/custom/doctype/customize_form/test_customize_form.py @@ -98,13 +98,17 @@ def test_save_customization_custom_field_property(self): custom_field = d.get("fields", {"fieldname": "test_custom_field"})[0] custom_field.reqd = 1 + custom_field.no_copy = 1 d.run_method("save_customization") - self.assertEquals(frappe.db.get_value("Custom Field", "Event-test_custom_field", "reqd"), 1) + self.assertEqual(frappe.db.get_value("Custom Field", "Event-test_custom_field", "reqd"), 1) + self.assertEqual(frappe.db.get_value("Custom Field", "Event-test_custom_field", "no_copy"), 1) custom_field = d.get("fields", {"is_custom_field": True})[0] custom_field.reqd = 0 + custom_field.no_copy = 0 d.run_method("save_customization") - self.assertEquals(frappe.db.get_value("Custom Field", "Event-test_custom_field", "reqd"), 0) + self.assertEqual(frappe.db.get_value("Custom Field", "Event-test_custom_field", "reqd"), 0) + self.assertEqual(frappe.db.get_value("Custom Field", "Event-test_custom_field", "no_copy"), 0) def test_save_customization_new_field(self): d = self.get_customize_form("Event") diff --git a/frappe/custom/doctype/customize_form_field/customize_form_field.json b/frappe/custom/doctype/customize_form_field/customize_form_field.json index 0a456b102605..9b2986d9e620 100644 --- a/frappe/custom/doctype/customize_form_field/customize_form_field.json +++ b/frappe/custom/doctype/customize_form_field/customize_form_field.json @@ -19,6 +19,7 @@ "in_global_search", "in_preview", "bold", + "no_copy", "allow_in_quick_entry", "translatable", "column_break_7", @@ -422,13 +423,19 @@ "fieldname": "non_negative", "fieldtype": "Check", "label": "Non Negative" + }, + { + "default": "0", + "fieldname": "no_copy", + "fieldtype": "Check", + "label": "No Copy" } ], "idx": 1, "index_web_pages_for_search": 1, "istable": 1, "links": [], - "modified": "2021-07-10 21:57:24.479749", + "modified": "2022-02-08 19:38:16.111199", "modified_by": "Administrator", "module": "Custom", "name": "Customize Form Field", @@ -436,4 +443,4 @@ "permissions": [], "sort_field": "modified", "sort_order": "ASC" -} \ No newline at end of file +}
carltongibson__django-filter-568
Import of rest_framework backend can fail In the DRF docs, they give an example which looks like: ``` import django_filters # ... class ProductFilter(django_filters.rest_framework.FilterSet): # ... ``` This does not work, however, with the way the `django_filters/__init__.py` is set up. Using this example, I get: `AttributeError: 'module' object has no attribute 'rest_framework'` I have found a fix for it, by adding the following to `django_filters/__init__.py`: `from . import rest_framework`
[ { "content": "# flake8: noqa\nfrom __future__ import absolute_import\nfrom .constants import STRICTNESS\nfrom .filterset import FilterSet\nfrom .filters import *\n\n__version__ = '1.0.0'\n\n\ndef parse_version(version):\n '''\n '0.1.2-dev' -> (0, 1, 2, 'dev')\n '0.1.2' -> (0, 1, 2)\n '''\n v = version.split('.')\n v = v[:-1] + v[-1].split('-')\n ret = []\n for p in v:\n if p.isdigit():\n ret.append(int(p))\n else:\n ret.append(p)\n return tuple(ret)\n\nVERSION = parse_version(__version__)\n", "path": "django_filters/__init__.py" } ]
[ { "content": "# flake8: noqa\nfrom __future__ import absolute_import\nfrom .constants import STRICTNESS\nfrom .filterset import FilterSet\nfrom .filters import *\n\n# We make the `rest_framework` module available without an additional import.\n# If DRF is not installed we simply set None.\ntry:\n from . import rest_framework\nexcept ImportError:\n rest_framework = None\n\n__version__ = '1.0.0'\n\n\ndef parse_version(version):\n '''\n '0.1.2-dev' -> (0, 1, 2, 'dev')\n '0.1.2' -> (0, 1, 2)\n '''\n v = version.split('.')\n v = v[:-1] + v[-1].split('-')\n ret = []\n for p in v:\n if p.isdigit():\n ret.append(int(p))\n else:\n ret.append(p)\n return tuple(ret)\n\nVERSION = parse_version(__version__)\n", "path": "django_filters/__init__.py" } ]
diff --git a/django_filters/__init__.py b/django_filters/__init__.py index 6f2400721..b9f928131 100644 --- a/django_filters/__init__.py +++ b/django_filters/__init__.py @@ -4,6 +4,13 @@ from .filterset import FilterSet from .filters import * +# We make the `rest_framework` module available without an additional import. +# If DRF is not installed we simply set None. +try: + from . import rest_framework +except ImportError: + rest_framework = None + __version__ = '1.0.0'
ivy-llc__ivy-13273
unravel_index
[ { "content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n# take_along_axis\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis, mode=\"fill\"):\n return ivy.take_along_axis(arr, indices, axis, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n\n\n@to_ivy_arrays_and_back\ndef triu_indices(n, k=0, m=None):\n return ivy.triu_indices(n, m, k)\n\n\n@to_ivy_arrays_and_back\ndef triu_indices_from(arr, k=0):\n return ivy.triu_indices(arr.shape[-2], arr.shape[-1], k)\n\n\ndef tril_indices_from(arr, k=0):\n return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)\n", "path": "ivy/functional/frontends/jax/numpy/indexing.py" } ]
[ { "content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n# take_along_axis\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis, mode=\"fill\"):\n return ivy.take_along_axis(arr, indices, axis, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n\n\n@to_ivy_arrays_and_back\ndef triu_indices(n, k=0, m=None):\n return ivy.triu_indices(n, m, k)\n\n\n@to_ivy_arrays_and_back\ndef triu_indices_from(arr, k=0):\n return ivy.triu_indices(arr.shape[-2], arr.shape[-1], k)\n\n\ndef tril_indices_from(arr, k=0):\n return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)\n\n\n# unravel_index\n@to_ivy_arrays_and_back\ndef unravel_index(indices, shape):\n ret = [x.astype(\"int64\") for x in ivy.unravel_index(indices, shape)]\n return tuple(ret)\n", "path": "ivy/functional/frontends/jax/numpy/indexing.py" } ]
diff --git a/ivy/functional/frontends/jax/numpy/indexing.py b/ivy/functional/frontends/jax/numpy/indexing.py index 10df7c6e7d956..0594d21259f97 100644 --- a/ivy/functional/frontends/jax/numpy/indexing.py +++ b/ivy/functional/frontends/jax/numpy/indexing.py @@ -44,3 +44,10 @@ def triu_indices_from(arr, k=0): def tril_indices_from(arr, k=0): return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k) + + +# unravel_index +@to_ivy_arrays_and_back +def unravel_index(indices, shape): + ret = [x.astype("int64") for x in ivy.unravel_index(indices, shape)] + return tuple(ret) diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py index d3a6a9025b487..9817ca2a0589e 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_indexing.py @@ -293,3 +293,48 @@ def test_jax_numpy_tril_indices_from( arr=x[0], k=k, ) + + +# unravel_index [email protected] +def max_value_as_shape_prod(draw): + shape = draw( + helpers.get_shape( + min_num_dims=1, + max_num_dims=5, + min_dim_size=1, + max_dim_size=5, + ) + ) + dtype_and_x = draw( + helpers.dtype_values_axis( + available_dtypes=["int32", "int64"], + min_value=0, + max_value=np.prod(shape) - 1, + ) + ) + return dtype_and_x, shape +@handle_frontend_test( + fn_tree="jax.numpy.unravel_index", + dtype_x_shape=max_value_as_shape_prod(), + test_with_out=st.just(False), +) +def test_jax_numpy_unravel_index( + *, + dtype_x_shape, + test_flags, + frontend, + fn_tree, + on_device, +): + dtype_and_x, shape = dtype_x_shape + input_dtype, x = dtype_and_x[0], dtype_and_x[1] + helpers.test_frontend_function( + input_dtypes=input_dtype, + test_flags=test_flags, + frontend=frontend, + fn_tree=fn_tree, + on_device=on_device, + indices=x[0], + shape=shape, + )
deeppavlov__DeepPavlov-76
What is "'Chainer' object has no attribute 'infer' 2018-03-04 14:09:23,638 (util.py:64 WorkerThread2) ERROR - TeleBot: "AttributeError occurred, args=("'Chainer' object has no attribute 'infer'",) Traceback (most recent call last): File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 58, in run task(*args, **kwargs) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 48, in handle_inference pred = model.infer(context) AttributeError: 'Chainer' object has no attribute 'infer' " 2018-03-04 14:09:23.638 ERROR in 'TeleBot'['util'] at line 64: AttributeError occurred, args=("'Chainer' object has no attribute 'infer'",) Traceback (most recent call last): File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 58, in run task(*args, **kwargs) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 48, in handle_inference pred = model.infer(context) AttributeError: 'Chainer' object has no attribute 'infer' Traceback (most recent call last): File "deep.py", line 60, in <module> main() File "deep.py", line 56, in main interact_model_by_telegram(pipeline_config_path, token) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 58, in interact_model_by_telegram init_bot_for_model(token, model) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 52, in init_bot_for_model bot.polling() File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/__init__.py", line 264, in polling self.__threaded_polling(none_stop, interval, timeout) File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/__init__.py", line 288, in __threaded_polling self.worker_pool.raise_exceptions() File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 107, in raise_exceptions six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2]) File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/six.py", line 693, in reraise raise value File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 58, in run task(*args, **kwargs) File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 48, in handle_inference pred = model.infer(context) AttributeError: 'Chainer' object has no attribute 'infer'
[ { "content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model.infer(context)\n reply_message = str(pred)\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py" } ]
[ { "content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model(context)\n reply_message = str(pred)\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py" } ]
diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py index 3841847438..1606180af6 100644 --- a/telegram_utils/telegram_ui.py +++ b/telegram_utils/telegram_ui.py @@ -45,7 +45,7 @@ def handle_inference(message): chat_id = message.chat.id context = message.text - pred = model.infer(context) + pred = model(context) reply_message = str(pred) bot.send_message(chat_id, reply_message)
microsoft__botbuilder-python-2057
4.14.6 CloudAdapter fails to send Typing Activity in Teams ## Version botbuilder-core 4.14.6 botbuilder-integration-aiohttp 4.14.6 botbuilder-schema 4.14.6 ## Describe the bug I am unable to send typing indicators with the `ShowTypingMiddleware` middleware, `turn_context.send_activity`, and `turn_context.send_activities`. ## To Reproduce Create a bot ``` cfg = DefaultConfig() adapter = CloudAdapter(ConfigurationBotFrameworkAuthentication(cfg)) bot = Bot() ``` define on_message_activity [From documentation](https://learn.microsoft.com/en-us/azure/bot-service/bot-builder-howto-send-messages?view=azure-bot-service-4.0&tabs=python) ``` async def on_message_activity(self, turn_context: TurnContext): # pylint: disable=unused-argument if turn_context.activity.text == "wait": return await turn_context.send_activities([ Activity( type=ActivityTypes.typing ), Activity( type="delay", value=3000 ), Activity( type=ActivityTypes.message, text="Finished Typing" ) ]) else: return await turn_context.send_activity( f"You said {turn_context.activity.text}. Say 'wait' to watch me type." ) ``` Publish in azure, set up MS Teams channel. send 'wait' via Microsoft Teams stacktrace: ``` Traceback (most recent call last): File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/bot_adapter.py", line 174, in run_pipeline return await self._middleware.receive_activity_with_status( File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/middleware_set.py", line 69, in receive_activity_with_status return await self.receive_activity_internal(context, callback) File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/middleware_set.py", line 79, in receive_activity_internal return await callback(context) File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/activity_handler.py", line 70, in on_turn await self.on_message_activity(turn_context) File "/home/josh/ctrlstack/babelfish/askbot/microsoft-teams/src/bot.py", line 78, in on_message_activity return await turn_context.send_activities([ File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/turn_context.py", line 225, in send_activities return await self._emit(self._on_send_activities, output, logic()) File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/turn_context.py", line 303, in _emit return await logic File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/turn_context.py", line 220, in logic responses = await self.adapter.send_activities(self, output) File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/cloud_adapter_base.py", line 103, in send_activities response = response or ResourceResponse(activity.id or "") ``` ## Expected behavior the typing indicator for 3 seconds.
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom abc import ABC\nfrom asyncio import sleep\nfrom copy import Error\nfrom http import HTTPStatus\nfrom typing import Awaitable, Callable, List, Union\nfrom uuid import uuid4\n\nfrom botbuilder.core.invoke_response import InvokeResponse\n\nfrom botbuilder.schema import (\n Activity,\n ActivityEventNames,\n ActivityTypes,\n ConversationAccount,\n ConversationReference,\n ConversationResourceResponse,\n ConversationParameters,\n DeliveryModes,\n ExpectedReplies,\n ResourceResponse,\n)\nfrom botframework.connector import Channels, ConnectorClient\nfrom botframework.connector.auth import (\n AuthenticationConstants,\n BotFrameworkAuthentication,\n ClaimsIdentity,\n)\nfrom botframework.connector.auth.authenticate_request_result import (\n AuthenticateRequestResult,\n)\nfrom botframework.connector.auth.connector_factory import ConnectorFactory\nfrom botframework.connector.auth.user_token_client import UserTokenClient\nfrom .bot_adapter import BotAdapter\nfrom .conversation_reference_extension import get_continuation_activity\nfrom .turn_context import TurnContext\n\n\nclass CloudAdapterBase(BotAdapter, ABC):\n CONNECTOR_FACTORY_KEY = \"ConnectorFactory\"\n USER_TOKEN_CLIENT_KEY = \"UserTokenClient\"\n\n def __init__(\n self, bot_framework_authentication: BotFrameworkAuthentication\n ) -> None:\n super().__init__()\n\n if not bot_framework_authentication:\n raise TypeError(\"Expected BotFrameworkAuthentication but got None instead\")\n\n self.bot_framework_authentication = bot_framework_authentication\n\n async def send_activities(\n self, context: TurnContext, activities: List[Activity]\n ) -> List[ResourceResponse]:\n if not context:\n raise TypeError(\"Expected TurnContext but got None instead\")\n\n if activities is None:\n raise TypeError(\"Expected Activities list but got None instead\")\n\n if len(activities) == 0:\n raise TypeError(\"Expecting one or more activities, but the list was empty.\")\n\n responses = []\n\n for activity in activities:\n activity.id = None\n\n response = ResourceResponse()\n\n if activity.type == \"delay\":\n delay_time = int((activity.value or 1000) / 1000)\n await sleep(delay_time)\n elif activity.type == ActivityTypes.invoke_response:\n context.turn_state[self._INVOKE_RESPONSE_KEY] = activity\n elif (\n activity.type == ActivityTypes.trace\n and activity.channel_id != Channels.emulator\n ):\n # no-op\n pass\n else:\n connector_client: ConnectorClient = context.turn_state.get(\n self.BOT_CONNECTOR_CLIENT_KEY\n )\n if not connector_client:\n raise Error(\"Unable to extract ConnectorClient from turn context.\")\n\n if activity.reply_to_id:\n response = await connector_client.conversations.reply_to_activity(\n activity.conversation.id, activity.reply_to_id, activity\n )\n else:\n response = (\n await connector_client.conversations.send_to_conversation(\n activity.conversation.id, activity\n )\n )\n\n response = response or ResourceResponse(activity.id or \"\")\n\n responses.append(response)\n\n return responses\n\n async def update_activity(self, context: TurnContext, activity: Activity):\n if not context:\n raise TypeError(\"Expected TurnContext but got None instead\")\n\n if activity is None:\n raise TypeError(\"Expected Activity but got None instead\")\n\n connector_client: ConnectorClient = context.turn_state.get(\n self.BOT_CONNECTOR_CLIENT_KEY\n )\n if not connector_client:\n raise Error(\"Unable to extract ConnectorClient from turn context.\")\n\n response = await connector_client.conversations.update_activity(\n activity.conversation.id, activity.reply_to_id, activity\n )\n\n response_id = response.id if response and response.id else None\n\n return ResourceResponse(id=response_id) if response_id else None\n\n async def delete_activity(\n self, context: TurnContext, reference: ConversationReference\n ):\n if not context:\n raise TypeError(\"Expected TurnContext but got None instead\")\n\n if not reference:\n raise TypeError(\"Expected ConversationReference but got None instead\")\n\n connector_client: ConnectorClient = context.turn_state.get(\n self.BOT_CONNECTOR_CLIENT_KEY\n )\n if not connector_client:\n raise Error(\"Unable to extract ConnectorClient from turn context.\")\n\n await connector_client.conversations.delete_activity(\n reference.conversation.id, reference.activity_id\n )\n\n async def continue_conversation( # pylint: disable=arguments-differ\n self,\n reference: ConversationReference,\n callback: Callable,\n bot_app_id: str,\n ):\n \"\"\"\n Sends a proactive message to a conversation.\n Call this method to proactively send a message to a conversation.\n Most channels require a user to initiate a conversation with a bot before the bot can send activities\n to the user.\n\n :param reference: A reference to the conversation to continue.\n :type reference: :class:`botbuilder.schema.ConversationReference`\n :param callback: The method to call for the resulting bot turn.\n :type callback: :class:`typing.Callable`\n :param bot_app_id: The application Id of the bot. This is the appId returned by the Azure portal registration,\n and is generally found in the `MicrosoftAppId` parameter in `config.py`.\n :type bot_app_id: :class:`typing.str`\n \"\"\"\n return await self.process_proactive(\n self.create_claims_identity(bot_app_id),\n get_continuation_activity(reference),\n None,\n callback,\n )\n\n async def continue_conversation_with_claims(\n self,\n claims_identity: ClaimsIdentity,\n reference: ConversationReference,\n audience: str,\n logic: Callable[[TurnContext], Awaitable],\n ):\n return await self.process_proactive(\n claims_identity, get_continuation_activity(reference), audience, logic\n )\n\n async def create_conversation( # pylint: disable=arguments-differ\n self,\n bot_app_id: str,\n callback: Callable[[TurnContext], Awaitable] = None,\n conversation_parameters: ConversationParameters = None,\n channel_id: str = None,\n service_url: str = None,\n audience: str = None,\n ):\n if not service_url:\n raise TypeError(\n \"CloudAdapter.create_conversation(): service_url is required.\"\n )\n if not conversation_parameters:\n raise TypeError(\n \"CloudAdapter.create_conversation(): conversation_parameters is required.\"\n )\n if not callback:\n raise TypeError(\"CloudAdapter.create_conversation(): callback is required.\")\n\n # Create a ClaimsIdentity, to create the connector and for adding to the turn context.\n claims_identity = self.create_claims_identity(bot_app_id)\n claims_identity.claims[AuthenticationConstants.SERVICE_URL_CLAIM] = service_url\n\n # create the connectror factory\n connector_factory = self.bot_framework_authentication.create_connector_factory(\n claims_identity\n )\n\n # Create the connector client to use for outbound requests.\n connector_client = await connector_factory.create(service_url, audience)\n\n # Make the actual create conversation call using the connector.\n create_conversation_result = (\n await connector_client.conversations.create_conversation(\n conversation_parameters\n )\n )\n\n # Create the create activity to communicate the results to the application.\n create_activity = self._create_create_activity(\n create_conversation_result, channel_id, service_url, conversation_parameters\n )\n\n # Create a UserTokenClient instance for the application to use. (For example, in the OAuthPrompt.)\n user_token_client = (\n await self.bot_framework_authentication.create_user_token_client(\n claims_identity\n )\n )\n\n # Create a turn context and run the pipeline.\n context = self._create_turn_context(\n create_activity,\n claims_identity,\n None,\n connector_client,\n user_token_client,\n callback,\n connector_factory,\n )\n\n # Run the pipeline\n await self.run_pipeline(context, callback)\n\n async def process_proactive(\n self,\n claims_identity: ClaimsIdentity,\n continuation_activity: Activity,\n audience: str,\n logic: Callable[[TurnContext], Awaitable],\n ):\n # Create the connector factory and the inbound request, extracting parameters and then create a\n # connector for outbound requests.\n connector_factory = self.bot_framework_authentication.create_connector_factory(\n claims_identity\n )\n\n # Create the connector client to use for outbound requests.\n connector_client = await connector_factory.create(\n continuation_activity.service_url, audience\n )\n\n # Create a UserTokenClient instance for the application to use. (For example, in the OAuthPrompt.)\n user_token_client = (\n await self.bot_framework_authentication.create_user_token_client(\n claims_identity\n )\n )\n\n # Create a turn context and run the pipeline.\n context = self._create_turn_context(\n continuation_activity,\n claims_identity,\n audience,\n connector_client,\n user_token_client,\n logic,\n connector_factory,\n )\n\n # Run the pipeline\n await self.run_pipeline(context, logic)\n\n async def process_activity(\n self,\n auth_header_or_authenticate_request_result: Union[\n str, AuthenticateRequestResult\n ],\n activity: Activity,\n logic: Callable[[TurnContext], Awaitable],\n ):\n \"\"\"\n Creates a turn context and runs the middleware pipeline for an incoming activity.\n\n :param auth_header: The HTTP authentication header of the request\n :type auth_header: :class:`typing.Union[typing.str, AuthenticateRequestResult]`\n :param activity: The incoming activity\n :type activity: :class:`Activity`\n :param logic: The logic to execute at the end of the adapter's middleware pipeline.\n :type logic: :class:`typing.Callable`\n\n :return: A task that represents the work queued to execute.\n\n .. remarks::\n This class processes an activity received by the bots web server. This includes any messages\n sent from a user and is the method that drives what's often referred to as the\n bots *reactive messaging* flow.\n Call this method to reactively send a message to a conversation.\n If the task completes successfully, then an :class:`InvokeResponse` is returned;\n otherwise. `null` is returned.\n \"\"\"\n # Authenticate the inbound request, extracting parameters and create a ConnectorFactory for creating a\n # Connector for outbound requests.\n authenticate_request_result = (\n await self.bot_framework_authentication.authenticate_request(\n activity, auth_header_or_authenticate_request_result\n )\n if isinstance(auth_header_or_authenticate_request_result, str)\n else auth_header_or_authenticate_request_result\n )\n\n # Set the caller_id on the activity\n activity.caller_id = authenticate_request_result.caller_id\n\n # Create the connector client to use for outbound requests.\n connector_client = (\n await authenticate_request_result.connector_factory.create(\n activity.service_url, authenticate_request_result.audience\n )\n if authenticate_request_result.connector_factory\n else None\n )\n\n if not connector_client:\n raise Error(\"Unable to extract ConnectorClient from turn context.\")\n\n # Create a UserTokenClient instance for the application to use.\n # (For example, it would be used in a sign-in prompt.)\n user_token_client = (\n await self.bot_framework_authentication.create_user_token_client(\n authenticate_request_result.claims_identity\n )\n )\n\n # Create a turn context and run the pipeline.\n context = self._create_turn_context(\n activity,\n authenticate_request_result.claims_identity,\n authenticate_request_result.audience,\n connector_client,\n user_token_client,\n logic,\n authenticate_request_result.connector_factory,\n )\n\n # Run the pipeline\n await self.run_pipeline(context, logic)\n\n # If there are any results they will have been left on the TurnContext.\n return self._process_turn_results(context)\n\n def create_claims_identity(self, bot_app_id: str = \"\") -> ClaimsIdentity:\n return ClaimsIdentity(\n {\n AuthenticationConstants.AUDIENCE_CLAIM: bot_app_id,\n AuthenticationConstants.APP_ID_CLAIM: bot_app_id,\n },\n True,\n )\n\n def _create_create_activity(\n self,\n create_conversation_result: ConversationResourceResponse,\n channel_id: str,\n service_url: str,\n conversation_parameters: ConversationParameters,\n ) -> Activity:\n # Create a conversation update activity to represent the result.\n activity = Activity.create_event_activity()\n activity.name = ActivityEventNames.create_conversation\n activity.channel_id = channel_id\n activity.service_url = service_url\n activity.id = create_conversation_result.activity_id or str(uuid4())\n activity.conversation = ConversationAccount(\n id=create_conversation_result.id,\n tenant_id=conversation_parameters.tenant_id,\n )\n activity.channel_data = conversation_parameters.channel_data\n activity.recipient = conversation_parameters.bot\n\n return activity\n\n def _create_turn_context(\n self,\n activity: Activity,\n claims_identity: ClaimsIdentity,\n oauth_scope: str,\n connector_client: ConnectorClient,\n user_token_client: UserTokenClient,\n logic: Callable[[TurnContext], Awaitable],\n connector_factory: ConnectorFactory,\n ) -> TurnContext:\n context = TurnContext(self, activity)\n\n context.turn_state[self.BOT_IDENTITY_KEY] = claims_identity\n context.turn_state[self.BOT_CONNECTOR_CLIENT_KEY] = connector_client\n context.turn_state[self.USER_TOKEN_CLIENT_KEY] = user_token_client\n\n context.turn_state[self.BOT_CALLBACK_HANDLER_KEY] = logic\n\n context.turn_state[self.CONNECTOR_FACTORY_KEY] = connector_factory\n context.turn_state[self.BOT_OAUTH_SCOPE_KEY] = oauth_scope\n\n return context\n\n def _process_turn_results(self, context: TurnContext) -> InvokeResponse:\n # Handle ExpectedReplies scenarios where all activities have been\n # buffered and sent back at once in an invoke response.\n if context.activity.delivery_mode == DeliveryModes.expect_replies:\n return InvokeResponse(\n status=HTTPStatus.OK,\n body=ExpectedReplies(activities=context.buffered_reply_activities),\n )\n\n # Handle Invoke scenarios where the bot will return a specific body and return code.\n if context.activity.type == ActivityTypes.invoke:\n activity_invoke_response: Activity = context.turn_state.get(\n self._INVOKE_RESPONSE_KEY\n )\n if not activity_invoke_response:\n return InvokeResponse(status=HTTPStatus.NOT_IMPLEMENTED)\n\n return activity_invoke_response.value\n\n # No body to return\n return None\n", "path": "libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py" } ]
[ { "content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom abc import ABC\nfrom asyncio import sleep\nfrom copy import Error\nfrom http import HTTPStatus\nfrom typing import Awaitable, Callable, List, Union\nfrom uuid import uuid4\n\nfrom botbuilder.core.invoke_response import InvokeResponse\n\nfrom botbuilder.schema import (\n Activity,\n ActivityEventNames,\n ActivityTypes,\n ConversationAccount,\n ConversationReference,\n ConversationResourceResponse,\n ConversationParameters,\n DeliveryModes,\n ExpectedReplies,\n ResourceResponse,\n)\nfrom botframework.connector import Channels, ConnectorClient\nfrom botframework.connector.auth import (\n AuthenticationConstants,\n BotFrameworkAuthentication,\n ClaimsIdentity,\n)\nfrom botframework.connector.auth.authenticate_request_result import (\n AuthenticateRequestResult,\n)\nfrom botframework.connector.auth.connector_factory import ConnectorFactory\nfrom botframework.connector.auth.user_token_client import UserTokenClient\nfrom .bot_adapter import BotAdapter\nfrom .conversation_reference_extension import get_continuation_activity\nfrom .turn_context import TurnContext\n\n\nclass CloudAdapterBase(BotAdapter, ABC):\n CONNECTOR_FACTORY_KEY = \"ConnectorFactory\"\n USER_TOKEN_CLIENT_KEY = \"UserTokenClient\"\n\n def __init__(\n self, bot_framework_authentication: BotFrameworkAuthentication\n ) -> None:\n super().__init__()\n\n if not bot_framework_authentication:\n raise TypeError(\"Expected BotFrameworkAuthentication but got None instead\")\n\n self.bot_framework_authentication = bot_framework_authentication\n\n async def send_activities(\n self, context: TurnContext, activities: List[Activity]\n ) -> List[ResourceResponse]:\n if not context:\n raise TypeError(\"Expected TurnContext but got None instead\")\n\n if activities is None:\n raise TypeError(\"Expected Activities list but got None instead\")\n\n if len(activities) == 0:\n raise TypeError(\"Expecting one or more activities, but the list was empty.\")\n\n responses = []\n\n for activity in activities:\n activity.id = None\n\n response = ResourceResponse()\n\n if activity.type == \"delay\":\n delay_time = int((activity.value or 1000) / 1000)\n await sleep(delay_time)\n elif activity.type == ActivityTypes.invoke_response:\n context.turn_state[self._INVOKE_RESPONSE_KEY] = activity\n elif (\n activity.type == ActivityTypes.trace\n and activity.channel_id != Channels.emulator\n ):\n # no-op\n pass\n else:\n connector_client: ConnectorClient = context.turn_state.get(\n self.BOT_CONNECTOR_CLIENT_KEY\n )\n if not connector_client:\n raise Error(\"Unable to extract ConnectorClient from turn context.\")\n\n if activity.reply_to_id:\n response = await connector_client.conversations.reply_to_activity(\n activity.conversation.id, activity.reply_to_id, activity\n )\n else:\n response = (\n await connector_client.conversations.send_to_conversation(\n activity.conversation.id, activity\n )\n )\n\n response = response or ResourceResponse(id=activity.id or \"\")\n\n responses.append(response)\n\n return responses\n\n async def update_activity(self, context: TurnContext, activity: Activity):\n if not context:\n raise TypeError(\"Expected TurnContext but got None instead\")\n\n if activity is None:\n raise TypeError(\"Expected Activity but got None instead\")\n\n connector_client: ConnectorClient = context.turn_state.get(\n self.BOT_CONNECTOR_CLIENT_KEY\n )\n if not connector_client:\n raise Error(\"Unable to extract ConnectorClient from turn context.\")\n\n response = await connector_client.conversations.update_activity(\n activity.conversation.id, activity.reply_to_id, activity\n )\n\n response_id = response.id if response and response.id else None\n\n return ResourceResponse(id=response_id) if response_id else None\n\n async def delete_activity(\n self, context: TurnContext, reference: ConversationReference\n ):\n if not context:\n raise TypeError(\"Expected TurnContext but got None instead\")\n\n if not reference:\n raise TypeError(\"Expected ConversationReference but got None instead\")\n\n connector_client: ConnectorClient = context.turn_state.get(\n self.BOT_CONNECTOR_CLIENT_KEY\n )\n if not connector_client:\n raise Error(\"Unable to extract ConnectorClient from turn context.\")\n\n await connector_client.conversations.delete_activity(\n reference.conversation.id, reference.activity_id\n )\n\n async def continue_conversation( # pylint: disable=arguments-differ\n self,\n reference: ConversationReference,\n callback: Callable,\n bot_app_id: str,\n ):\n \"\"\"\n Sends a proactive message to a conversation.\n Call this method to proactively send a message to a conversation.\n Most channels require a user to initiate a conversation with a bot before the bot can send activities\n to the user.\n\n :param reference: A reference to the conversation to continue.\n :type reference: :class:`botbuilder.schema.ConversationReference`\n :param callback: The method to call for the resulting bot turn.\n :type callback: :class:`typing.Callable`\n :param bot_app_id: The application Id of the bot. This is the appId returned by the Azure portal registration,\n and is generally found in the `MicrosoftAppId` parameter in `config.py`.\n :type bot_app_id: :class:`typing.str`\n \"\"\"\n return await self.process_proactive(\n self.create_claims_identity(bot_app_id),\n get_continuation_activity(reference),\n None,\n callback,\n )\n\n async def continue_conversation_with_claims(\n self,\n claims_identity: ClaimsIdentity,\n reference: ConversationReference,\n audience: str,\n logic: Callable[[TurnContext], Awaitable],\n ):\n return await self.process_proactive(\n claims_identity, get_continuation_activity(reference), audience, logic\n )\n\n async def create_conversation( # pylint: disable=arguments-differ\n self,\n bot_app_id: str,\n callback: Callable[[TurnContext], Awaitable] = None,\n conversation_parameters: ConversationParameters = None,\n channel_id: str = None,\n service_url: str = None,\n audience: str = None,\n ):\n if not service_url:\n raise TypeError(\n \"CloudAdapter.create_conversation(): service_url is required.\"\n )\n if not conversation_parameters:\n raise TypeError(\n \"CloudAdapter.create_conversation(): conversation_parameters is required.\"\n )\n if not callback:\n raise TypeError(\"CloudAdapter.create_conversation(): callback is required.\")\n\n # Create a ClaimsIdentity, to create the connector and for adding to the turn context.\n claims_identity = self.create_claims_identity(bot_app_id)\n claims_identity.claims[AuthenticationConstants.SERVICE_URL_CLAIM] = service_url\n\n # create the connectror factory\n connector_factory = self.bot_framework_authentication.create_connector_factory(\n claims_identity\n )\n\n # Create the connector client to use for outbound requests.\n connector_client = await connector_factory.create(service_url, audience)\n\n # Make the actual create conversation call using the connector.\n create_conversation_result = (\n await connector_client.conversations.create_conversation(\n conversation_parameters\n )\n )\n\n # Create the create activity to communicate the results to the application.\n create_activity = self._create_create_activity(\n create_conversation_result, channel_id, service_url, conversation_parameters\n )\n\n # Create a UserTokenClient instance for the application to use. (For example, in the OAuthPrompt.)\n user_token_client = (\n await self.bot_framework_authentication.create_user_token_client(\n claims_identity\n )\n )\n\n # Create a turn context and run the pipeline.\n context = self._create_turn_context(\n create_activity,\n claims_identity,\n None,\n connector_client,\n user_token_client,\n callback,\n connector_factory,\n )\n\n # Run the pipeline\n await self.run_pipeline(context, callback)\n\n async def process_proactive(\n self,\n claims_identity: ClaimsIdentity,\n continuation_activity: Activity,\n audience: str,\n logic: Callable[[TurnContext], Awaitable],\n ):\n # Create the connector factory and the inbound request, extracting parameters and then create a\n # connector for outbound requests.\n connector_factory = self.bot_framework_authentication.create_connector_factory(\n claims_identity\n )\n\n # Create the connector client to use for outbound requests.\n connector_client = await connector_factory.create(\n continuation_activity.service_url, audience\n )\n\n # Create a UserTokenClient instance for the application to use. (For example, in the OAuthPrompt.)\n user_token_client = (\n await self.bot_framework_authentication.create_user_token_client(\n claims_identity\n )\n )\n\n # Create a turn context and run the pipeline.\n context = self._create_turn_context(\n continuation_activity,\n claims_identity,\n audience,\n connector_client,\n user_token_client,\n logic,\n connector_factory,\n )\n\n # Run the pipeline\n await self.run_pipeline(context, logic)\n\n async def process_activity(\n self,\n auth_header_or_authenticate_request_result: Union[\n str, AuthenticateRequestResult\n ],\n activity: Activity,\n logic: Callable[[TurnContext], Awaitable],\n ):\n \"\"\"\n Creates a turn context and runs the middleware pipeline for an incoming activity.\n\n :param auth_header: The HTTP authentication header of the request\n :type auth_header: :class:`typing.Union[typing.str, AuthenticateRequestResult]`\n :param activity: The incoming activity\n :type activity: :class:`Activity`\n :param logic: The logic to execute at the end of the adapter's middleware pipeline.\n :type logic: :class:`typing.Callable`\n\n :return: A task that represents the work queued to execute.\n\n .. remarks::\n This class processes an activity received by the bots web server. This includes any messages\n sent from a user and is the method that drives what's often referred to as the\n bots *reactive messaging* flow.\n Call this method to reactively send a message to a conversation.\n If the task completes successfully, then an :class:`InvokeResponse` is returned;\n otherwise. `null` is returned.\n \"\"\"\n # Authenticate the inbound request, extracting parameters and create a ConnectorFactory for creating a\n # Connector for outbound requests.\n authenticate_request_result = (\n await self.bot_framework_authentication.authenticate_request(\n activity, auth_header_or_authenticate_request_result\n )\n if isinstance(auth_header_or_authenticate_request_result, str)\n else auth_header_or_authenticate_request_result\n )\n\n # Set the caller_id on the activity\n activity.caller_id = authenticate_request_result.caller_id\n\n # Create the connector client to use for outbound requests.\n connector_client = (\n await authenticate_request_result.connector_factory.create(\n activity.service_url, authenticate_request_result.audience\n )\n if authenticate_request_result.connector_factory\n else None\n )\n\n if not connector_client:\n raise Error(\"Unable to extract ConnectorClient from turn context.\")\n\n # Create a UserTokenClient instance for the application to use.\n # (For example, it would be used in a sign-in prompt.)\n user_token_client = (\n await self.bot_framework_authentication.create_user_token_client(\n authenticate_request_result.claims_identity\n )\n )\n\n # Create a turn context and run the pipeline.\n context = self._create_turn_context(\n activity,\n authenticate_request_result.claims_identity,\n authenticate_request_result.audience,\n connector_client,\n user_token_client,\n logic,\n authenticate_request_result.connector_factory,\n )\n\n # Run the pipeline\n await self.run_pipeline(context, logic)\n\n # If there are any results they will have been left on the TurnContext.\n return self._process_turn_results(context)\n\n def create_claims_identity(self, bot_app_id: str = \"\") -> ClaimsIdentity:\n return ClaimsIdentity(\n {\n AuthenticationConstants.AUDIENCE_CLAIM: bot_app_id,\n AuthenticationConstants.APP_ID_CLAIM: bot_app_id,\n },\n True,\n )\n\n def _create_create_activity(\n self,\n create_conversation_result: ConversationResourceResponse,\n channel_id: str,\n service_url: str,\n conversation_parameters: ConversationParameters,\n ) -> Activity:\n # Create a conversation update activity to represent the result.\n activity = Activity.create_event_activity()\n activity.name = ActivityEventNames.create_conversation\n activity.channel_id = channel_id\n activity.service_url = service_url\n activity.id = create_conversation_result.activity_id or str(uuid4())\n activity.conversation = ConversationAccount(\n id=create_conversation_result.id,\n tenant_id=conversation_parameters.tenant_id,\n )\n activity.channel_data = conversation_parameters.channel_data\n activity.recipient = conversation_parameters.bot\n\n return activity\n\n def _create_turn_context(\n self,\n activity: Activity,\n claims_identity: ClaimsIdentity,\n oauth_scope: str,\n connector_client: ConnectorClient,\n user_token_client: UserTokenClient,\n logic: Callable[[TurnContext], Awaitable],\n connector_factory: ConnectorFactory,\n ) -> TurnContext:\n context = TurnContext(self, activity)\n\n context.turn_state[self.BOT_IDENTITY_KEY] = claims_identity\n context.turn_state[self.BOT_CONNECTOR_CLIENT_KEY] = connector_client\n context.turn_state[self.USER_TOKEN_CLIENT_KEY] = user_token_client\n\n context.turn_state[self.BOT_CALLBACK_HANDLER_KEY] = logic\n\n context.turn_state[self.CONNECTOR_FACTORY_KEY] = connector_factory\n context.turn_state[self.BOT_OAUTH_SCOPE_KEY] = oauth_scope\n\n return context\n\n def _process_turn_results(self, context: TurnContext) -> InvokeResponse:\n # Handle ExpectedReplies scenarios where all activities have been\n # buffered and sent back at once in an invoke response.\n if context.activity.delivery_mode == DeliveryModes.expect_replies:\n return InvokeResponse(\n status=HTTPStatus.OK,\n body=ExpectedReplies(activities=context.buffered_reply_activities),\n )\n\n # Handle Invoke scenarios where the bot will return a specific body and return code.\n if context.activity.type == ActivityTypes.invoke:\n activity_invoke_response: Activity = context.turn_state.get(\n self._INVOKE_RESPONSE_KEY\n )\n if not activity_invoke_response:\n return InvokeResponse(status=HTTPStatus.NOT_IMPLEMENTED)\n\n return activity_invoke_response.value\n\n # No body to return\n return None\n", "path": "libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py" } ]
diff --git a/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py b/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py index c5eda9589..7e996c90c 100644 --- a/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py +++ b/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py @@ -100,7 +100,7 @@ async def send_activities( ) ) - response = response or ResourceResponse(activity.id or "") + response = response or ResourceResponse(id=activity.id or "") responses.append(response) diff --git a/libraries/botbuilder-core/tests/simple_adapter.py b/libraries/botbuilder-core/tests/simple_adapter.py index ae68dc323..2ba3f31b8 100644 --- a/libraries/botbuilder-core/tests/simple_adapter.py +++ b/libraries/botbuilder-core/tests/simple_adapter.py @@ -75,7 +75,7 @@ async def update_activity(self, context: TurnContext, activity: Activity): if self._call_on_update is not None: self._call_on_update(activity) - return ResourceResponse(activity.id) + return ResourceResponse(id=activity.id) async def process_request(self, activity, handler): context = TurnContext(self, activity) diff --git a/libraries/botbuilder-core/tests/teams/simple_adapter_with_create_conversation.py b/libraries/botbuilder-core/tests/teams/simple_adapter_with_create_conversation.py index d1801d978..cacfbd5ed 100644 --- a/libraries/botbuilder-core/tests/teams/simple_adapter_with_create_conversation.py +++ b/libraries/botbuilder-core/tests/teams/simple_adapter_with_create_conversation.py @@ -76,7 +76,7 @@ async def update_activity(self, context: TurnContext, activity: Activity): if self._call_on_update is not None: self._call_on_update(activity) - return ResourceResponse(activity.id) + return ResourceResponse(id=activity.id) async def process_request(self, activity, handler): context = TurnContext(self, activity)
sunpy__sunpy-1365
JSOC download skips first file in Results object
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport time\nimport urlparse\nimport warnings\n\nimport requests\nimport numpy as np\nimport astropy.units as u\nimport astropy.time\nimport astropy.table\n\nfrom sunpy import config\nfrom sunpy.time import parse_time, TimeRange\nfrom sunpy.net.download import Downloader\nfrom sunpy.net.vso.vso import Results\nfrom sunpy.net.attr import and_\nfrom sunpy.net.jsoc.attrs import walker\n\n__all__ = ['JSOCClient', 'JSOCResponse']\n\nJSOC_INFO_URL = 'http://jsoc.stanford.edu/cgi-bin/ajax/jsoc_info'\nJSOC_EXPORT_URL = 'http://jsoc.stanford.edu/cgi-bin/ajax/jsoc_fetch'\nBASE_DL_URL = 'http://jsoc.stanford.edu'\n\n\nclass JSOCResponse(object):\n def __init__(self, table=None):\n \"\"\"\n table : astropy.table.Table\n \"\"\"\n\n self.table = table\n self.query_args = None\n self.requestIDs = None\n\n def __str__(self):\n return str(self.table)\n\n def __repr__(self):\n return repr(self.table)\n\n def __len__(self):\n if self.table is None:\n return 0\n else:\n return len(self.table)\n\n def append(self, table):\n if self.table is None:\n self.table = table\n else:\n self.table = astropy.table.vstack([self.table, table])\n\n\nclass JSOCClient(object):\n \"\"\"\n This is a Client to the JSOC Data Export service.\n\n It exposes a similar API to the VSO client, although the underlying model\n is more complex. The JSOC stages data before you can download it, so a JSOC\n query is a three stage process, first you query the JSOC for records,\n a table of these records is returned. Then you can request these records to\n be staged for download and then you can download them.\n The last two stages of this process are bundled together into the `get()`\n method, but they can be seperated if you are performing a large or complex\n query.\n\n .. warning::\n JSOC now requires you to register your email address before requesting\n data. See this site: http://jsoc.stanford.edu/ajax/register_email.html\n\n Notes\n -----\n This Client mocks input to this site: http://jsoc.stanford.edu/ajax/exportdata.html\n Therefore that is a good resource if things are mis-behaving.\n The full list of 'series' is availible through this site: http://jsoc.stanford.edu/\n\n You can build more complex queries by specifiying parameters to POST to JSOC via keyword\n arguments. You can generate these kwargs using the Export Data page at JSOC.\n\n\n Examples\n --------\n\n *Example 1*\n\n Query JSOC for some HMI data at 45 second cadence:\n\n >>> from sunpy.net import jsoc\n >>> client = jsoc.JSOCClient()\n >>> response = client.query(jsoc.Time('2014-01-01T00:00:00', '2014-01-01T01:00:00'),\n ... jsoc.Series('hmi.m_45s'))\n\n the response object holds the records that your query will return:\n\n >>> print response\n DATE TELESCOP INSTRUME ... WAVELNTH WAVEUNIT\n -------------------- -------- ---------- ... -------- ---------------\n 2012-09-05T08:27:19Z SDO/HMI HMI_FRONT2 ... 6173.0 Invalid KeyLink\n 2012-09-05T08:27:20Z SDO/HMI HMI_FRONT2 ... 6173.0 Invalid KeyLink\n\n You can then make the request and download the data:\n\n >>> res = client.get(response)\n Request JSOC_20140724_947 was submitted 6 seconds ago, it is not ready to download.\n Request JSOC_20140724_947 was exported at 2014.07.24_22:08:09_UT and is ready to download.\n 2 URLs found for Download. Totalling 30MB\n\n This returns a Results instance which can be used to watch the progress\n of the download.\n\n >>> res.wait(progress=True)\n\n *Example 2*\n\n Query the JSOC for some AIA 171 data, and seperate out the staging and the\n download steps:\n\n >>> import astropy.units as u\n >>> from sunpy.net import jsoc\n >>> client = jsoc.JSOCClient()\n >>> response = client.query(jsoc.Time('2014/1/1T00:00:00', '2014/1/1T00:00:36'),\n jsoc.Series('aia.lev1_euv_12s'), jsoc.Segment('image'),\n jsoc.Wavelength(171*u.AA))\n\n the response object holds the records that your query will return:\n\n >>> print response\n DATE TELESCOP INSTRUME T_OBS WAVELNTH WAVEUNIT\n -------------------- -------- -------- ----------------------- -------- --------\n 2014-01-06T15:07:12Z SDO/AIA AIA_3 2013-12-31T23:59:36.34Z 171 angstrom\n 2014-01-06T15:07:12Z SDO/AIA AIA_3 2013-12-31T23:59:48.34Z 171 angstrom\n 2014-01-07T15:05:10Z SDO/AIA AIA_3 2014-01-01T00:00:00.34Z 171 angstrom\n 2014-01-07T15:05:10Z SDO/AIA AIA_3 2014-01-01T00:00:12.34Z 171 angstrom\n\n You can then make the request:\n\n >>> requestIDs = client.request_data(response)\n [u'JSOC_20140724_952']\n\n This returns a list of all the request identifiers for your query.\n\n You can then check the status of the request, which will print out a status\n message and return you the status code, a code of 1 means it is not ready\n to download and a code of 0 means the request is staged and ready. A code\n of 6 means an error, which is commonly that the request has not had time to\n get into the que.\n\n >>> status = client.check_request(requestIDs)\n Request JSOC_20140724_955 was submitted 10 seconds ago, it is not ready to download.\n\n Once the status code is 0 you can download the data using the `get_request`\n method:\n\n >>> res = client.get_request(requestIDs)\n\n This returns a Results instance which can be used to watch the progress\n of the download.\n\n >>> res.wait(progress=True)\n \"\"\"\n\n def query(self, *query, **kwargs):\n \"\"\"\n Build a JSOC query and submit it to JSOC for processing.\n\n Takes a variable number of :mod:`sunpy.net.jsoc.attrs` as parameters,\n which are chained together using the AND (`&`) operator.\n\n Complex queries to be easily formed using logical operators such as\n `&` and `|`, in the same way as the VSO client.\n\n Examples\n --------\n Request all AIA 304 image data between 2010-01-01T00:00 and\n 2010-01-01T01:00 in rice compressed form.\n\n >>> client.query(jsoc.Time('2010-01-01T00:00', '2010-01-01T01:00'),\n ... jsoc.Series('aia.lev1_euv_12s'), jsoc.Wavelength(304),\n ... jsoc.Compression('rice'), jsoc.Segment('image'))\n\n Returns\n -------\n results: JSOCResults object\n A collection of records that the query returns.\n \"\"\"\n\n return_results = JSOCResponse()\n query = and_(*query)\n for block in walker.create(query):\n iargs = kwargs.copy()\n iargs.update(block)\n\n return_results.append(self._lookup_records(iargs))\n\n return_results.query_args = iargs\n\n return return_results\n\n def request_data(self, jsoc_response, **kwargs):\n \"\"\"\n Request that JSOC stages the data for download.\n\n Parameters\n ----------\n jsoc_response : JSOCResponse object\n The results of a query\n\n Returns\n -------\n requestIDs : list of strings\n List of the JSOC request identifiers\n\n \"\"\"\n # A little (hidden) debug feature\n return_responses = kwargs.pop('return_resp', False)\n if len(kwargs):\n raise TypeError(\"request_data got unexpected keyword arguments {0}\".format(kwargs.keys()))\n\n # Do a multi-request for each query block\n responses = self._multi_request(**jsoc_response.query_args)\n for i, response in enumerate(responses):\n if response.status_code != 200:\n warnings.warn(\n Warning(\"Query {0} retuned code {1}\".format(i, response.status_code)))\n responses.pop(i)\n elif response.json()['status'] != 2:\n warnings.warn(\n Warning(\"Query {0} retuned status {1} with error {2}\".format(i,\n response.json()['status'],\n response.json()['error'])))\n responses.pop(i)\n\n # Extract the IDs from the JSON\n requestIDs = [response.json()['requestid'] for response in responses]\n\n if return_responses:\n return responses\n\n return requestIDs\n\n def check_request(self, requestIDs):\n \"\"\"\n Check the status of a request and print out a message about it\n\n Parameters\n ----------\n requestIDs: list or string\n A list of requestIDs to check\n\n Returns\n -------\n status: list\n A list of status' that were returned by JSOC\n \"\"\"\n # Convert IDs to a list if not already\n if not astropy.utils.misc.isiterable(requestIDs) or isinstance(requestIDs, basestring):\n requestIDs = [requestIDs]\n\n allstatus = []\n for request_id in requestIDs:\n u = self._request_status(request_id)\n status = int(u.json()['status'])\n\n if status == 0: # Data ready to download\n print(\n\"Request {0} was exported at {1} and is ready to download.\".format(\n u.json()['requestid'], u.json()['exptime']))\n elif status == 1:\n print(\n\"Request {0} was submitted {1} seconds ago, it is not ready to download.\".format(\n u.json()['requestid'], u.json()['wait']))\n else:\n print(\"Request returned status: {0} with error: {1}\".format(\n u.json()['status'], u.json()['error']))\n\n allstatus.append(status)\n\n return allstatus\n\n def get(self, jsoc_response, path=None, overwrite=False, progress=True,\n max_conn=5, sleep=10):\n \"\"\"\n Make the request for the data in jsoc_response and wait for it to be\n staged and then download the data.\n\n Parameters\n ----------\n jsoc_response: JSOCResponse object\n A response object\n\n path: string\n Path to save data to, defaults to SunPy download dir\n\n overwrite: bool\n Replace files with the same name if True\n\n progress: bool\n Print progress info to terminal\n\n max_conns: int\n Maximum number of download connections.\n\n downloader: sunpy.download.Downloder instance\n A Custom downloader to use\n\n sleep: int\n The number of seconds to wait between calls to JSOC to check the status\n of the request.\n\n Returns\n -------\n results: a :class:`sunpy.net.vso.Results instance`\n A Results object\n \"\"\"\n\n # Make staging request to JSOC\n requestIDs = self.request_data(jsoc_response)\n # Add them to the response for good measure\n jsoc_response.requestIDs = requestIDs\n time.sleep(sleep/2.)\n\n while requestIDs:\n for i, request_id in enumerate(requestIDs):\n u = self._request_status(request_id)\n\n if progress:\n self.check_request(request_id)\n\n if u.status_code == 200 and u.json()['status'] == '0':\n rID = requestIDs.pop(i)\n r = self.get_request(rID, path=path, overwrite=overwrite,\n progress=progress)\n\n else:\n time.sleep(sleep)\n\n return r\n\n def get_request(self, requestIDs, path=None, overwrite=False, progress=True,\n max_conn=5, downloader=None, results=None):\n \"\"\"\n Query JSOC to see if request_id is ready for download.\n\n If the request is ready for download, download it.\n\n Parameters\n ----------\n requestIDs: list or string\n One or many requestID strings\n\n path: string\n Path to save data to, defaults to SunPy download dir\n\n overwrite: bool\n Replace files with the same name if True\n\n progress: bool\n Print progress info to terminal\n\n max_conns: int\n Maximum number of download connections.\n\n downloader: sunpy.download.Downloader instance\n A Custom downloader to use\n\n results: Results instance\n A Results manager to use.\n\n Returns\n -------\n res: Results\n A Results instance or None if no URLs to download\n \"\"\"\n\n # Convert IDs to a list if not already\n if not astropy.utils.misc.isiterable(requestIDs) or isinstance(requestIDs, basestring):\n requestIDs = [requestIDs]\n\n if path is None:\n path = config.get('downloads','download_dir')\n path = os.path.expanduser(path)\n\n if downloader is None:\n downloader = Downloader(max_conn=max_conn, max_total=max_conn)\n\n # A Results object tracks the number of downloads requested and the\n # number that have been completed.\n if results is None:\n results = Results(lambda _: downloader.stop())\n\n urls = []\n for request_id in requestIDs:\n u = self._request_status(request_id)\n\n if u.status_code == 200 and u.json()['status'] == '0':\n for ar in u.json()['data']:\n if overwrite or not os.path.isfile(os.path.join(path, ar['filename'])):\n urls.append(urlparse.urljoin(BASE_DL_URL + u.json()['dir'] +\n '/', ar['filename']))\n\n else:\n print(\"Skipping download of file {} as it has already been downloaded\".format(ar['filename']))\n # Add the file on disk to the output\n results.map_.update({ar['filename']:{'path':os.path.join(path, ar['filename'])}})\n\n if progress:\n print(\"{0} URLs found for Download. Totalling {1}MB\".format(\n len(urls), u.json()['size']))\n\n else:\n if progress:\n self.check_request(request_id)\n\n if urls:\n for url in urls:\n downloader.download(url, callback=results.require([url]),\n errback=lambda x: print(x), path=path)\n\n else:\n #Make Results think it has finished.\n results.require([])\n\n results.poke()\n return results\n\n def _process_time(self, time):\n \"\"\"\n Take a UTC time string or datetime instance and generate a astropy.time\n object in TAI frame. Alternatively convert a astropy time object to TAI\n\n Parameters\n ----------\n time: basestring or datetime or astropy.time\n Input time\n\n Returns\n -------\n datetime, in TAI\n \"\"\"\n # Convert from any input (in UTC) to TAI\n if isinstance(time, basestring):\n time = parse_time(time)\n time = astropy.time.Time(time, scale='utc')\n time = time.tai #change the scale to TAI\n\n return time.datetime\n\n def _make_recordset(self, start_time, end_time, series, wavelength='',\n segment='', **kwargs):\n # Build the dataset string\n # Extract and format Wavelength\n if wavelength:\n if not series.startswith('aia'):\n raise TypeError(\"This series does not support the wavelength attribute.\")\n else:\n if isinstance(wavelength, list):\n wavelength = [int(np.ceil(wave.to(u.AA).value)) for wave in wavelength]\n wavelength = str(wavelength)\n else:\n wavelength = '[{0}]'.format(int(np.ceil(wavelength.to(u.AA).value)))\n\n # Extract and format segment\n if segment != '':\n segment = '{{{segment}}}'.format(segment=segment)\n\n dataset = '{series}[{start}-{end}]{wavelength}{segment}'.format(\n series=series, start=start_time.strftime(\"%Y.%m.%d_%H:%M:%S_TAI\"),\n end=end_time.strftime(\"%Y.%m.%d_%H:%M:%S_TAI\"),\n wavelength=wavelength, segment=segment)\n\n return dataset\n\n def _make_query_payload(self, start_time, end_time, series, notify=None,\n protocol='FITS', compression='rice', **kwargs):\n \"\"\"\n Build the POST payload for the query parameters\n \"\"\"\n\n if protocol.upper() == 'FITS' and compression and compression.lower() == 'rice':\n jprotocol = 'FITS,compress Rice'\n elif protocol.upper() == 'FITS':\n jprotocol = 'FITS, **NONE**'\n else:\n jprotocol = protocol\n\n if not notify:\n raise ValueError(\"JSOC queries now require a valid email address \"\n \"before they will be accepted by the server\")\n\n dataset = self._make_recordset(start_time, end_time, series, **kwargs)\n kwargs.pop('wavelength', None)\n\n # Build full POST payload\n payload = {'ds': dataset,\n 'format': 'json',\n 'method': 'url',\n 'notify': notify,\n 'op': 'exp_request',\n 'process': 'n=0|no_op',\n 'protocol': jprotocol,\n 'requestor': 'none',\n 'filenamefmt': '{0}.{{T_REC:A}}.{{CAMERA}}.{{segment}}'.format(series)}\n\n payload.update(kwargs)\n return payload\n\n def _send_jsoc_request(self, start_time, end_time, series, notify=None,\n protocol='FITS', compression='rice', **kwargs):\n \"\"\"\n Request that JSOC stages data for download\n\n This routine puts in a POST request to JSOC\n \"\"\"\n\n payload = self._make_query_payload(start_time, end_time, series,\n notify=notify, protocol=protocol,\n compression=compression, **kwargs)\n\n r = requests.post(JSOC_EXPORT_URL, data=payload)\n\n if r.status_code != 200:\n raise Exception(\"JSOC POST Request returned code {0}\".format(r.status_code))\n\n return r, r.json()\n\n def _lookup_records(self, iargs):\n \"\"\"\n Do a LookData request to JSOC to workout what results the query returns\n \"\"\"\n keywords = ['DATE', 'TELESCOP', 'INSTRUME', 'T_OBS', 'WAVELNTH',\n 'WAVEUNIT']\n\n if not all([k in iargs for k in ('start_time', 'end_time', 'series')]):\n raise ValueError(\"Both Time and Series must be specified for a JSOC Query\")\n\n postthis = {'ds': self._make_recordset(**iargs),\n 'op': 'rs_list',\n 'key': str(keywords)[1:-1].replace(' ', '').replace(\"'\", ''),\n 'seg': '**NONE**',\n 'link': '**NONE**'}\n\n r = requests.get(JSOC_INFO_URL, params=postthis)\n\n result = r.json()\n\n out_table = {}\n if 'keywords' in result:\n for col in result['keywords']:\n out_table.update({col['name']:col['values']})\n\n # sort the table before returning\n return astropy.table.Table(out_table)[keywords]\n\n else:\n return astropy.table.Table()\n\n def _multi_request(self, **kwargs):\n \"\"\"\n Make a series of requests to avoid the 100GB limit\n \"\"\"\n start_time = kwargs.pop('start_time', None)\n end_time = kwargs.pop('end_time', None)\n series = kwargs.pop('series', None)\n if any(x is None for x in (start_time, end_time, series)):\n return []\n start_time = self._process_time(start_time)\n end_time = self._process_time(end_time)\n tr = TimeRange(start_time, end_time)\n returns = []\n response, json_response = self._send_jsoc_request(start_time, end_time, series, **kwargs)\n\n # We skip these lines because a massive request is not a pratical test.\n if (json_response['status'] == 3 and\n json_response['error'] == 'Request exceeds max byte limit of 100000MB'): #pragma: no cover\n returns.append(self._multi_request(tr.start(), tr.center(), series, **kwargs)[0]) #pragma: no cover\n returns.append(self._multi_request(tr.center(), tr.end(), series, **kwargs)[0]) #pragma: no cover\n else:\n returns.append(response)\n\n return returns\n\n def _request_status(self, request_id):\n \"\"\"\n GET the status of a request ID\n \"\"\"\n payload = {'op':'exp_status', 'requestid':request_id}\n u = requests.get(JSOC_EXPORT_URL, params=payload)\n\n return u\n\n", "path": "sunpy/net/jsoc/jsoc.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport time\nimport urlparse\nimport warnings\n\nimport requests\nimport numpy as np\nimport astropy.units as u\nimport astropy.time\nimport astropy.table\n\nfrom sunpy import config\nfrom sunpy.time import parse_time, TimeRange\nfrom sunpy.net.download import Downloader\nfrom sunpy.net.vso.vso import Results\nfrom sunpy.net.attr import and_\nfrom sunpy.net.jsoc.attrs import walker\n\n__all__ = ['JSOCClient', 'JSOCResponse']\n\nJSOC_INFO_URL = 'http://jsoc.stanford.edu/cgi-bin/ajax/jsoc_info'\nJSOC_EXPORT_URL = 'http://jsoc.stanford.edu/cgi-bin/ajax/jsoc_fetch'\nBASE_DL_URL = 'http://jsoc.stanford.edu'\n\n\nclass JSOCResponse(object):\n def __init__(self, table=None):\n \"\"\"\n table : astropy.table.Table\n \"\"\"\n\n self.table = table\n self.query_args = None\n self.requestIDs = None\n\n def __str__(self):\n return str(self.table)\n\n def __repr__(self):\n return repr(self.table)\n\n def __len__(self):\n if self.table is None:\n return 0\n else:\n return len(self.table)\n\n def append(self, table):\n if self.table is None:\n self.table = table\n else:\n self.table = astropy.table.vstack([self.table, table])\n\n\nclass JSOCClient(object):\n \"\"\"\n This is a Client to the JSOC Data Export service.\n\n It exposes a similar API to the VSO client, although the underlying model\n is more complex. The JSOC stages data before you can download it, so a JSOC\n query is a three stage process, first you query the JSOC for records,\n a table of these records is returned. Then you can request these records to\n be staged for download and then you can download them.\n The last two stages of this process are bundled together into the `get()`\n method, but they can be seperated if you are performing a large or complex\n query.\n\n .. warning::\n JSOC now requires you to register your email address before requesting\n data. See this site: http://jsoc.stanford.edu/ajax/register_email.html\n\n Notes\n -----\n This Client mocks input to this site: http://jsoc.stanford.edu/ajax/exportdata.html\n Therefore that is a good resource if things are mis-behaving.\n The full list of 'series' is availible through this site: http://jsoc.stanford.edu/\n\n You can build more complex queries by specifiying parameters to POST to JSOC via keyword\n arguments. You can generate these kwargs using the Export Data page at JSOC.\n\n\n Examples\n --------\n\n *Example 1*\n\n Query JSOC for some HMI data at 45 second cadence:\n\n >>> from sunpy.net import jsoc\n >>> client = jsoc.JSOCClient()\n >>> response = client.query(jsoc.Time('2014-01-01T00:00:00', '2014-01-01T01:00:00'),\n ... jsoc.Series('hmi.m_45s'))\n\n the response object holds the records that your query will return:\n\n >>> print response\n DATE TELESCOP INSTRUME ... WAVELNTH WAVEUNIT\n -------------------- -------- ---------- ... -------- ---------------\n 2012-09-05T08:27:19Z SDO/HMI HMI_FRONT2 ... 6173.0 Invalid KeyLink\n 2012-09-05T08:27:20Z SDO/HMI HMI_FRONT2 ... 6173.0 Invalid KeyLink\n\n You can then make the request and download the data:\n\n >>> res = client.get(response)\n Request JSOC_20140724_947 was submitted 6 seconds ago, it is not ready to download.\n Request JSOC_20140724_947 was exported at 2014.07.24_22:08:09_UT and is ready to download.\n 2 URLs found for Download. Totalling 30MB\n\n This returns a Results instance which can be used to watch the progress\n of the download.\n\n >>> res.wait(progress=True)\n\n *Example 2*\n\n Query the JSOC for some AIA 171 data, and seperate out the staging and the\n download steps:\n\n >>> import astropy.units as u\n >>> from sunpy.net import jsoc\n >>> client = jsoc.JSOCClient()\n >>> response = client.query(jsoc.Time('2014/1/1T00:00:00', '2014/1/1T00:00:36'),\n jsoc.Series('aia.lev1_euv_12s'), jsoc.Segment('image'),\n jsoc.Wavelength(171*u.AA))\n\n the response object holds the records that your query will return:\n\n >>> print response\n DATE TELESCOP INSTRUME T_OBS WAVELNTH WAVEUNIT\n -------------------- -------- -------- ----------------------- -------- --------\n 2014-01-06T15:07:12Z SDO/AIA AIA_3 2013-12-31T23:59:36.34Z 171 angstrom\n 2014-01-06T15:07:12Z SDO/AIA AIA_3 2013-12-31T23:59:48.34Z 171 angstrom\n 2014-01-07T15:05:10Z SDO/AIA AIA_3 2014-01-01T00:00:00.34Z 171 angstrom\n 2014-01-07T15:05:10Z SDO/AIA AIA_3 2014-01-01T00:00:12.34Z 171 angstrom\n\n You can then make the request:\n\n >>> requestIDs = client.request_data(response)\n [u'JSOC_20140724_952']\n\n This returns a list of all the request identifiers for your query.\n\n You can then check the status of the request, which will print out a status\n message and return you the status code, a code of 1 means it is not ready\n to download and a code of 0 means the request is staged and ready. A code\n of 6 means an error, which is commonly that the request has not had time to\n get into the que.\n\n >>> status = client.check_request(requestIDs)\n Request JSOC_20140724_955 was submitted 10 seconds ago, it is not ready to download.\n\n Once the status code is 0 you can download the data using the `get_request`\n method:\n\n >>> res = client.get_request(requestIDs)\n\n This returns a Results instance which can be used to watch the progress\n of the download.\n\n >>> res.wait(progress=True)\n \"\"\"\n\n def query(self, *query, **kwargs):\n \"\"\"\n Build a JSOC query and submit it to JSOC for processing.\n\n Takes a variable number of :mod:`sunpy.net.jsoc.attrs` as parameters,\n which are chained together using the AND (`&`) operator.\n\n Complex queries to be easily formed using logical operators such as\n `&` and `|`, in the same way as the VSO client.\n\n Examples\n --------\n Request all AIA 304 image data between 2010-01-01T00:00 and\n 2010-01-01T01:00 in rice compressed form.\n\n >>> client.query(jsoc.Time('2010-01-01T00:00', '2010-01-01T01:00'),\n ... jsoc.Series('aia.lev1_euv_12s'), jsoc.Wavelength(304),\n ... jsoc.Compression('rice'), jsoc.Segment('image'))\n\n Returns\n -------\n results: JSOCResults object\n A collection of records that the query returns.\n \"\"\"\n\n return_results = JSOCResponse()\n query = and_(*query)\n for block in walker.create(query):\n iargs = kwargs.copy()\n iargs.update(block)\n\n return_results.append(self._lookup_records(iargs))\n\n return_results.query_args = iargs\n\n return return_results\n\n def request_data(self, jsoc_response, **kwargs):\n \"\"\"\n Request that JSOC stages the data for download.\n\n Parameters\n ----------\n jsoc_response : JSOCResponse object\n The results of a query\n\n Returns\n -------\n requestIDs : list of strings\n List of the JSOC request identifiers\n\n \"\"\"\n # A little (hidden) debug feature\n return_responses = kwargs.pop('return_resp', False)\n if len(kwargs):\n raise TypeError(\"request_data got unexpected keyword arguments {0}\".format(kwargs.keys()))\n\n # Do a multi-request for each query block\n responses = self._multi_request(**jsoc_response.query_args)\n for i, response in enumerate(responses):\n if response.status_code != 200:\n warnings.warn(\n Warning(\"Query {0} retuned code {1}\".format(i, response.status_code)))\n responses.pop(i)\n elif response.json()['status'] != 2:\n warnings.warn(\n Warning(\"Query {0} retuned status {1} with error {2}\".format(i,\n response.json()['status'],\n response.json()['error'])))\n responses.pop(i)\n\n # Extract the IDs from the JSON\n requestIDs = [response.json()['requestid'] for response in responses]\n\n if return_responses:\n return responses\n\n return requestIDs\n\n def check_request(self, requestIDs):\n \"\"\"\n Check the status of a request and print out a message about it\n\n Parameters\n ----------\n requestIDs: list or string\n A list of requestIDs to check\n\n Returns\n -------\n status: list\n A list of status' that were returned by JSOC\n \"\"\"\n # Convert IDs to a list if not already\n if not astropy.utils.misc.isiterable(requestIDs) or isinstance(requestIDs, basestring):\n requestIDs = [requestIDs]\n\n allstatus = []\n for request_id in requestIDs:\n u = self._request_status(request_id)\n status = int(u.json()['status'])\n\n if status == 0: # Data ready to download\n print(\n\"Request {0} was exported at {1} and is ready to download.\".format(\n u.json()['requestid'], u.json()['exptime']))\n elif status == 1:\n print(\n\"Request {0} was submitted {1} seconds ago, it is not ready to download.\".format(\n u.json()['requestid'], u.json()['wait']))\n else:\n print(\"Request returned status: {0} with error: {1}\".format(\n u.json()['status'], u.json()['error']))\n\n allstatus.append(status)\n\n return allstatus\n\n def get(self, jsoc_response, path=None, overwrite=False, progress=True,\n max_conn=5, sleep=10):\n \"\"\"\n Make the request for the data in jsoc_response and wait for it to be\n staged and then download the data.\n\n Parameters\n ----------\n jsoc_response: JSOCResponse object\n A response object\n\n path: string\n Path to save data to, defaults to SunPy download dir\n\n overwrite: bool\n Replace files with the same name if True\n\n progress: bool\n Print progress info to terminal\n\n max_conns: int\n Maximum number of download connections.\n\n downloader: sunpy.download.Downloder instance\n A Custom downloader to use\n\n sleep: int\n The number of seconds to wait between calls to JSOC to check the status\n of the request.\n\n Returns\n -------\n results: a :class:`sunpy.net.vso.Results instance`\n A Results object\n \"\"\"\n\n # Make staging request to JSOC\n requestIDs = self.request_data(jsoc_response)\n # Add them to the response for good measure\n jsoc_response.requestIDs = requestIDs\n time.sleep(sleep/2.)\n\n while requestIDs:\n for i, request_id in enumerate(requestIDs):\n u = self._request_status(request_id)\n\n if progress:\n self.check_request(request_id)\n\n if u.status_code == 200 and u.json()['status'] == '0':\n rID = requestIDs.pop(i)\n r = self.get_request(rID, path=path, overwrite=overwrite,\n progress=progress)\n\n else:\n time.sleep(sleep)\n\n return r\n\n def get_request(self, requestIDs, path=None, overwrite=False, progress=True,\n max_conn=5, downloader=None, results=None):\n \"\"\"\n Query JSOC to see if request_id is ready for download.\n\n If the request is ready for download, download it.\n\n Parameters\n ----------\n requestIDs: list or string\n One or many requestID strings\n\n path: string\n Path to save data to, defaults to SunPy download dir\n\n overwrite: bool\n Replace files with the same name if True\n\n progress: bool\n Print progress info to terminal\n\n max_conns: int\n Maximum number of download connections.\n\n downloader: sunpy.download.Downloader instance\n A Custom downloader to use\n\n results: Results instance\n A Results manager to use.\n\n Returns\n -------\n res: Results\n A Results instance or None if no URLs to download\n \"\"\"\n\n # Convert IDs to a list if not already\n if not astropy.utils.misc.isiterable(requestIDs) or isinstance(requestIDs, basestring):\n requestIDs = [requestIDs]\n\n if path is None:\n path = config.get('downloads','download_dir')\n path = os.path.expanduser(path)\n\n if downloader is None:\n downloader = Downloader(max_conn=max_conn, max_total=max_conn)\n\n # A Results object tracks the number of downloads requested and the\n # number that have been completed.\n if results is None:\n results = Results(lambda _: downloader.stop())\n\n urls = []\n for request_id in requestIDs:\n u = self._request_status(request_id)\n\n if u.status_code == 200 and u.json()['status'] == '0':\n for ar in u.json()['data']:\n if overwrite or not os.path.isfile(os.path.join(path, ar['filename'])):\n urls.append(urlparse.urljoin(BASE_DL_URL + u.json()['dir'] +\n '/', ar['filename']))\n\n else:\n print(\"Skipping download of file {} as it has already been downloaded\".format(ar['filename']))\n # Add the file on disk to the output\n results.map_.update({ar['filename']:{'path':os.path.join(path, ar['filename'])}})\n\n if progress:\n print(\"{0} URLs found for Download. Totalling {1}MB\".format(\n len(urls), u.json()['size']))\n\n else:\n if progress:\n self.check_request(request_id)\n\n if urls:\n for url in urls:\n downloader.download(url, callback=results.require([url]),\n errback=lambda x: print(x), path=path)\n\n else:\n #Make Results think it has finished.\n results.require([])\n results.poke()\n\n return results\n\n def _process_time(self, time):\n \"\"\"\n Take a UTC time string or datetime instance and generate a astropy.time\n object in TAI frame. Alternatively convert a astropy time object to TAI\n\n Parameters\n ----------\n time: basestring or datetime or astropy.time\n Input time\n\n Returns\n -------\n datetime, in TAI\n \"\"\"\n # Convert from any input (in UTC) to TAI\n if isinstance(time, basestring):\n time = parse_time(time)\n time = astropy.time.Time(time, scale='utc')\n time = time.tai #change the scale to TAI\n\n return time.datetime\n\n def _make_recordset(self, start_time, end_time, series, wavelength='',\n segment='', **kwargs):\n # Build the dataset string\n # Extract and format Wavelength\n if wavelength:\n if not series.startswith('aia'):\n raise TypeError(\"This series does not support the wavelength attribute.\")\n else:\n if isinstance(wavelength, list):\n wavelength = [int(np.ceil(wave.to(u.AA).value)) for wave in wavelength]\n wavelength = str(wavelength)\n else:\n wavelength = '[{0}]'.format(int(np.ceil(wavelength.to(u.AA).value)))\n\n # Extract and format segment\n if segment != '':\n segment = '{{{segment}}}'.format(segment=segment)\n\n dataset = '{series}[{start}-{end}]{wavelength}{segment}'.format(\n series=series, start=start_time.strftime(\"%Y.%m.%d_%H:%M:%S_TAI\"),\n end=end_time.strftime(\"%Y.%m.%d_%H:%M:%S_TAI\"),\n wavelength=wavelength, segment=segment)\n\n return dataset\n\n def _make_query_payload(self, start_time, end_time, series, notify=None,\n protocol='FITS', compression='rice', **kwargs):\n \"\"\"\n Build the POST payload for the query parameters\n \"\"\"\n\n if protocol.upper() == 'FITS' and compression and compression.lower() == 'rice':\n jprotocol = 'FITS,compress Rice'\n elif protocol.upper() == 'FITS':\n jprotocol = 'FITS, **NONE**'\n else:\n jprotocol = protocol\n\n if not notify:\n raise ValueError(\"JSOC queries now require a valid email address \"\n \"before they will be accepted by the server\")\n\n dataset = self._make_recordset(start_time, end_time, series, **kwargs)\n kwargs.pop('wavelength', None)\n\n # Build full POST payload\n payload = {'ds': dataset,\n 'format': 'json',\n 'method': 'url',\n 'notify': notify,\n 'op': 'exp_request',\n 'process': 'n=0|no_op',\n 'protocol': jprotocol,\n 'requestor': 'none',\n 'filenamefmt': '{0}.{{T_REC:A}}.{{CAMERA}}.{{segment}}'.format(series)}\n\n payload.update(kwargs)\n return payload\n\n def _send_jsoc_request(self, start_time, end_time, series, notify=None,\n protocol='FITS', compression='rice', **kwargs):\n \"\"\"\n Request that JSOC stages data for download\n\n This routine puts in a POST request to JSOC\n \"\"\"\n\n payload = self._make_query_payload(start_time, end_time, series,\n notify=notify, protocol=protocol,\n compression=compression, **kwargs)\n\n r = requests.post(JSOC_EXPORT_URL, data=payload)\n\n if r.status_code != 200:\n raise Exception(\"JSOC POST Request returned code {0}\".format(r.status_code))\n\n return r, r.json()\n\n def _lookup_records(self, iargs):\n \"\"\"\n Do a LookData request to JSOC to workout what results the query returns\n \"\"\"\n keywords = ['DATE', 'TELESCOP', 'INSTRUME', 'T_OBS', 'WAVELNTH',\n 'WAVEUNIT']\n\n if not all([k in iargs for k in ('start_time', 'end_time', 'series')]):\n raise ValueError(\"Both Time and Series must be specified for a JSOC Query\")\n\n postthis = {'ds': self._make_recordset(**iargs),\n 'op': 'rs_list',\n 'key': str(keywords)[1:-1].replace(' ', '').replace(\"'\", ''),\n 'seg': '**NONE**',\n 'link': '**NONE**'}\n\n r = requests.get(JSOC_INFO_URL, params=postthis)\n\n result = r.json()\n\n out_table = {}\n if 'keywords' in result:\n for col in result['keywords']:\n out_table.update({col['name']:col['values']})\n\n # sort the table before returning\n return astropy.table.Table(out_table)[keywords]\n\n else:\n return astropy.table.Table()\n\n def _multi_request(self, **kwargs):\n \"\"\"\n Make a series of requests to avoid the 100GB limit\n \"\"\"\n start_time = kwargs.pop('start_time', None)\n end_time = kwargs.pop('end_time', None)\n series = kwargs.pop('series', None)\n if any(x is None for x in (start_time, end_time, series)):\n return []\n start_time = self._process_time(start_time)\n end_time = self._process_time(end_time)\n tr = TimeRange(start_time, end_time)\n returns = []\n response, json_response = self._send_jsoc_request(start_time, end_time, series, **kwargs)\n\n # We skip these lines because a massive request is not a pratical test.\n if (json_response['status'] == 3 and\n json_response['error'] == 'Request exceeds max byte limit of 100000MB'): #pragma: no cover\n returns.append(self._multi_request(tr.start(), tr.center(), series, **kwargs)[0]) #pragma: no cover\n returns.append(self._multi_request(tr.center(), tr.end(), series, **kwargs)[0]) #pragma: no cover\n else:\n returns.append(response)\n\n return returns\n\n def _request_status(self, request_id):\n \"\"\"\n GET the status of a request ID\n \"\"\"\n payload = {'op':'exp_status', 'requestid':request_id}\n u = requests.get(JSOC_EXPORT_URL, params=payload)\n\n return u\n\n", "path": "sunpy/net/jsoc/jsoc.py" } ]
diff --git a/sunpy/net/jsoc/jsoc.py b/sunpy/net/jsoc/jsoc.py index 494e2f3e311..0c39fbf6414 100644 --- a/sunpy/net/jsoc/jsoc.py +++ b/sunpy/net/jsoc/jsoc.py @@ -423,8 +423,8 @@ def get_request(self, requestIDs, path=None, overwrite=False, progress=True, else: #Make Results think it has finished. results.require([]) + results.poke() - results.poke() return results def _process_time(self, time): diff --git a/sunpy/net/jsoc/tests/test_jsoc.py b/sunpy/net/jsoc/tests/test_jsoc.py index af1bcb721b0..8cdb4405ab1 100644 --- a/sunpy/net/jsoc/tests/test_jsoc.py +++ b/sunpy/net/jsoc/tests/test_jsoc.py @@ -212,7 +212,6 @@ def test_get_request(): assert isinstance(aa, Results) @pytest.mark.online [email protected] def test_results_filenames(): responses = client.query(attrs.Time('2014/1/1T1:00:36', '2014/1/1T01:01:38'), attrs.Series('hmi.M_45s'), attrs.Notify('[email protected]'))
open-telemetry__opentelemetry-python-contrib-1515
Add readthedocs documentation for remoulade instrumentation Part of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUsage\n-----\n\n* Start broker backend\n\n::\n\n docker run -p 5672:5672 rabbitmq\n\n* Run instrumented actor\n\n.. code-block:: python\n\n from remoulade.brokers.rabbitmq import RabbitmqBroker\n import remoulade\n\n RemouladeInstrumentor().instrument()\n\n broker = RabbitmqBroker()\n remoulade.set_broker(broker)\n\n @remoulade.actor\n def multiply(x, y):\n return x * y\n\n broker.declare_actor(count_words)\n\n multiply.send(43, 51)\n\n\"\"\"\nfrom typing import Collection\n\nfrom remoulade import Middleware, broker\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.remoulade import utils\nfrom opentelemetry.instrumentation.remoulade.package import _instruments\nfrom opentelemetry.instrumentation.remoulade.version import __version__\nfrom opentelemetry.propagate import extract, inject\nfrom opentelemetry.semconv.trace import SpanAttributes\n\n_REMOULADE_MESSAGE_TAG_KEY = \"remoulade.action\"\n_REMOULADE_MESSAGE_SEND = \"send\"\n_REMOULADE_MESSAGE_RUN = \"run\"\n\n_REMOULADE_MESSAGE_NAME_KEY = \"remoulade.actor_name\"\n\n_REMOULADE_MESSAGE_RETRY_COUNT_KEY = \"remoulade.retry_count\"\n\n\nclass _InstrumentationMiddleware(Middleware):\n def __init__(self, _tracer):\n self._tracer = _tracer\n self._span_registry = {}\n\n def before_process_message(self, _broker, message):\n if \"trace_ctx\" not in message.options:\n return\n\n trace_ctx = extract(message.options[\"trace_ctx\"])\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_process_message\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.CONSUMER,\n context=trace_ctx,\n attributes=span_attributes,\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry, message.message_id, (span, activation)\n )\n\n def after_process_message(\n self, _broker, message, *, result=None, exception=None\n ):\n span, activation = utils.retrieve_span(\n self._span_registry, message.message_id\n )\n\n if span is None:\n # no existing span found for message_id\n return\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_RUN,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation.__exit__(None, None, None)\n utils.detach_span(self._span_registry, message.message_id)\n\n def before_enqueue(self, _broker, message, delay):\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_enqueue\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.PRODUCER,\n attributes=span_attributes,\n )\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_SEND,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry,\n message.message_id,\n (span, activation),\n is_publish=True,\n )\n\n if \"trace_ctx\" not in message.options:\n message.options[\"trace_ctx\"] = {}\n inject(message.options[\"trace_ctx\"])\n\n def after_enqueue(self, _broker, message, delay, exception=None):\n _, activation = utils.retrieve_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n if activation is None:\n # no existing span found for message_id\n return\n\n activation.__exit__(None, None, None)\n utils.detach_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n\nclass RemouladeInstrumentor(BaseInstrumentor):\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n instrumentation_middleware = _InstrumentationMiddleware(self._tracer)\n\n broker.add_extra_default_middleware(instrumentation_middleware)\n\n def _uninstrument(self, **kwargs):\n broker.remove_extra_default_middleware(_InstrumentationMiddleware)\n", "path": "instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py" } ]
[ { "content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUsage\n-----\n\nStart broker backend\n\n::\n\n docker run -p 5672:5672 rabbitmq\n\nRun instrumented actor\n\n.. code-block:: python\n\n from remoulade.brokers.rabbitmq import RabbitmqBroker\n import remoulade\n\n RemouladeInstrumentor().instrument()\n\n broker = RabbitmqBroker()\n remoulade.set_broker(broker)\n\n @remoulade.actor\n def multiply(x, y):\n return x * y\n\n broker.declare_actor(count_words)\n\n multiply.send(43, 51)\n\n\"\"\"\nfrom typing import Collection\n\nfrom remoulade import Middleware, broker\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.remoulade import utils\nfrom opentelemetry.instrumentation.remoulade.package import _instruments\nfrom opentelemetry.instrumentation.remoulade.version import __version__\nfrom opentelemetry.propagate import extract, inject\nfrom opentelemetry.semconv.trace import SpanAttributes\n\n_REMOULADE_MESSAGE_TAG_KEY = \"remoulade.action\"\n_REMOULADE_MESSAGE_SEND = \"send\"\n_REMOULADE_MESSAGE_RUN = \"run\"\n\n_REMOULADE_MESSAGE_NAME_KEY = \"remoulade.actor_name\"\n\n_REMOULADE_MESSAGE_RETRY_COUNT_KEY = \"remoulade.retry_count\"\n\n\nclass _InstrumentationMiddleware(Middleware):\n def __init__(self, _tracer):\n self._tracer = _tracer\n self._span_registry = {}\n\n def before_process_message(self, _broker, message):\n if \"trace_ctx\" not in message.options:\n return\n\n trace_ctx = extract(message.options[\"trace_ctx\"])\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_process_message\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.CONSUMER,\n context=trace_ctx,\n attributes=span_attributes,\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry, message.message_id, (span, activation)\n )\n\n def after_process_message(\n self, _broker, message, *, result=None, exception=None\n ):\n span, activation = utils.retrieve_span(\n self._span_registry, message.message_id\n )\n\n if span is None:\n # no existing span found for message_id\n return\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_RUN,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation.__exit__(None, None, None)\n utils.detach_span(self._span_registry, message.message_id)\n\n def before_enqueue(self, _broker, message, delay):\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_enqueue\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.PRODUCER,\n attributes=span_attributes,\n )\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_SEND,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry,\n message.message_id,\n (span, activation),\n is_publish=True,\n )\n\n if \"trace_ctx\" not in message.options:\n message.options[\"trace_ctx\"] = {}\n inject(message.options[\"trace_ctx\"])\n\n def after_enqueue(self, _broker, message, delay, exception=None):\n _, activation = utils.retrieve_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n if activation is None:\n # no existing span found for message_id\n return\n\n activation.__exit__(None, None, None)\n utils.detach_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n\nclass RemouladeInstrumentor(BaseInstrumentor):\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n instrumentation_middleware = _InstrumentationMiddleware(self._tracer)\n\n broker.add_extra_default_middleware(instrumentation_middleware)\n\n def _uninstrument(self, **kwargs):\n broker.remove_extra_default_middleware(_InstrumentationMiddleware)\n", "path": "instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py" } ]
diff --git a/docs/instrumentation/remoulade/remoulade.rst b/docs/instrumentation/remoulade/remoulade.rst new file mode 100644 index 0000000000..bc5a7da42c --- /dev/null +++ b/docs/instrumentation/remoulade/remoulade.rst @@ -0,0 +1,7 @@ +.. include:: ../../../instrumentation/opentelemetry-instrumentation-remoulade/README.rst + + +.. automodule:: opentelemetry.instrumentation.remoulade + :members: + :undoc-members: + :show-inheritance: diff --git a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py index c9e53d92df..87a26585fc 100644 --- a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py @@ -16,13 +16,13 @@ Usage ----- -* Start broker backend +Start broker backend :: docker run -p 5672:5672 rabbitmq -* Run instrumented actor +Run instrumented actor .. code-block:: python
frappe__frappe-23585
Route History shouldn‘t be editable Editing or adding a new Route History: ![IMG_2238](https://github.com/frappe/frappe/assets/46800703/caf5ccf6-80c0-4d66-8ed8-1716eaf27f52) ![IMG_2240](https://github.com/frappe/frappe/assets/46800703/ff1d1ae5-f657-4607-a27e-e390096b5144) … shouldn’t be possible, not even for the Administrator.
[ { "content": "# Copyright (c) 2022, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe.deferred_insert import deferred_insert as _deferred_insert\nfrom frappe.model.document import Document\n\n\nclass RouteHistory(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\troute: DF.Data | None\n\t\tuser: DF.Link | None\n\t# end: auto-generated types\n\t@staticmethod\n\tdef clear_old_logs(days=30):\n\t\tfrom frappe.query_builder import Interval\n\t\tfrom frappe.query_builder.functions import Now\n\n\t\ttable = frappe.qb.DocType(\"Route History\")\n\t\tfrappe.db.delete(table, filters=(table.modified < (Now() - Interval(days=days))))\n\n\[email protected]()\ndef deferred_insert(routes):\n\troutes = [\n\t\t{\n\t\t\t\"user\": frappe.session.user,\n\t\t\t\"route\": route.get(\"route\"),\n\t\t\t\"creation\": route.get(\"creation\"),\n\t\t}\n\t\tfor route in frappe.parse_json(routes)\n\t]\n\n\t_deferred_insert(\"Route History\", routes)\n\n\[email protected]()\ndef frequently_visited_links():\n\treturn frappe.get_all(\n\t\t\"Route History\",\n\t\tfields=[\"route\", \"count(name) as count\"],\n\t\tfilters={\"user\": frappe.session.user},\n\t\tgroup_by=\"route\",\n\t\torder_by=\"count desc\",\n\t\tlimit=5,\n\t)\n", "path": "frappe/desk/doctype/route_history/route_history.py" } ]
[ { "content": "# Copyright (c) 2022, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe.deferred_insert import deferred_insert as _deferred_insert\nfrom frappe.model.document import Document\n\n\nclass RouteHistory(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\troute: DF.Data | None\n\t\tuser: DF.Link | None\n\t# end: auto-generated types\n\n\t@staticmethod\n\tdef clear_old_logs(days=30):\n\t\tfrom frappe.query_builder import Interval\n\t\tfrom frappe.query_builder.functions import Now\n\n\t\ttable = frappe.qb.DocType(\"Route History\")\n\t\tfrappe.db.delete(table, filters=(table.modified < (Now() - Interval(days=days))))\n\n\[email protected]()\ndef deferred_insert(routes):\n\troutes = [\n\t\t{\n\t\t\t\"user\": frappe.session.user,\n\t\t\t\"route\": route.get(\"route\"),\n\t\t\t\"creation\": route.get(\"creation\"),\n\t\t}\n\t\tfor route in frappe.parse_json(routes)\n\t]\n\n\t_deferred_insert(\"Route History\", routes)\n\n\[email protected]()\ndef frequently_visited_links():\n\treturn frappe.get_all(\n\t\t\"Route History\",\n\t\tfields=[\"route\", \"count(name) as count\"],\n\t\tfilters={\"user\": frappe.session.user},\n\t\tgroup_by=\"route\",\n\t\torder_by=\"count desc\",\n\t\tlimit=5,\n\t)\n", "path": "frappe/desk/doctype/route_history/route_history.py" } ]
diff --git a/frappe/desk/doctype/route_history/route_history.json b/frappe/desk/doctype/route_history/route_history.json index a5d73fc360d2..0b96277431da 100644 --- a/frappe/desk/doctype/route_history/route_history.json +++ b/frappe/desk/doctype/route_history/route_history.json @@ -1,5 +1,6 @@ { "actions": [], + "allow_copy": 1, "creation": "2018-10-05 11:26:04.601113", "doctype": "DocType", "editable_grid": 1, @@ -13,7 +14,9 @@ "fieldname": "route", "fieldtype": "Data", "in_list_view": 1, - "label": "Route" + "label": "Route", + "no_copy": 1, + "read_only": 1 }, { "fieldname": "user", @@ -21,30 +24,29 @@ "in_list_view": 1, "in_standard_filter": 1, "label": "User", - "options": "User" + "no_copy": 1, + "options": "User", + "read_only": 1 } ], + "in_create": 1, "links": [], - "modified": "2022-06-13 05:48:56.967244", + "modified": "2023-12-04 04:41:32.448331", "modified_by": "Administrator", "module": "Desk", "name": "Route History", "owner": "Administrator", "permissions": [ { - "create": 1, - "delete": 1, "email": 1, "export": 1, "print": 1, "read": 1, "report": 1, "role": "System Manager", - "share": 1, - "write": 1 + "share": 1 } ], - "quick_entry": 1, "sort_field": "modified", "sort_order": "DESC", "states": [], diff --git a/frappe/desk/doctype/route_history/route_history.py b/frappe/desk/doctype/route_history/route_history.py index 9aba975c3a87..5c0c37d4a731 100644 --- a/frappe/desk/doctype/route_history/route_history.py +++ b/frappe/desk/doctype/route_history/route_history.py @@ -18,6 +18,7 @@ class RouteHistory(Document): route: DF.Data | None user: DF.Link | None # end: auto-generated types + @staticmethod def clear_old_logs(days=30): from frappe.query_builder import Interval
akvo__akvo-rsr-2158
Improve IATI export ## Test plan Before starting with testing, make sure to perform two one-time actions: - The `iati_export` cron job is running (done on Test and UAT) - See running cron jobs by running: `manage.py crontab show`; - The `perform_iati_checks` management command has been run (done on Test and UAT). --- GIVEN the 'My IATI' section in MyRSR WHEN connected to multiple organisations (or as superuser) THEN an organisation selection screen should be shown WHEN connected to one organisation THEN this organisation should be automatically selected GIVEN the 'My IATI' section in MyRSR WHEN an organisation has been selected THEN the overview of all IATI exports should be shown AND for each export the status, number of projects, created by, created at and IATI version should be shown AND the latest IATI export should be shown in a green row AND a pending or in progress IATI export should be shown in a yellow row AND a cancelled or export without an IATI file should be shown in a red row GIVEN the 'My IATI' section in MyRSR WHEN an organisation has been selected THEN it should be possible to select whether the latest IATI file is shown on the organisation page GIVEN that is has been set that the latest IATI file is shown on the organisation page THEN it should be shown on the organisation page as well ELSE the IATI file should not be shown GIVEN that the 'Add new IATI export' button is clicked THEN the user should be redirected to the project selection overview GIVEN the project selection overview WHEN looking at the projects overview THEN all projects where the selected organisation is reporting organisation should be shown GIVEN the project selection overview WHEN applying a filter THEN the project selection should change AND the indication of the number of projects selected should indicate the number of selected projects GIVEN the project selection overview WHEN projects are selected AND the 'create new IATI export' button is clicked THEN the user should be redirected to the IATI exports overview AND the top IATI export should be the new IATI export (with 'Pending' status) GIVEN the IATI export overview WHEN an export is pending or in progress THEN the overview should be refreshed every 10 seconds AND when an export is in progress, the number of processed projects should be shown ## Issue description Currently, an IATI export with more than 70 projects will give a DB timeout. However, we need to be able to export an IATI file with any amount of projects. Similar to the IATI import, we can use a cron job for this. - [x] Move IATI export to a cron job implementation - [x] Update the 'My IATI' tab in MyRSR
[ { "content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module. \n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport re\n\nfrom xml.sax.saxutils import XMLGenerator\n\nfrom django.contrib.syndication.views import FeedDoesNotExist, Feed\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.feedgenerator import Rss201rev2Feed\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom akvo.rsr.models import Project, ProjectUpdate, Organisation\n\n\ndef __dict_replace(s, d):\n \"\"\"Replace substrings of a string using a dictionary.\"\"\"\n for key, value in d.items():\n s = s.replace(key, value)\n return s\n\ndef __escape(data, entities):\n # must do ampersand first\n data = data.replace(\"&\", \"&amp;\")\n data = data.replace(\">\", \"&gt;\")\n data = data.replace(\"<\", \"&lt;\")\n if entities:\n data = __dict_replace(data, entities)\n return data\n\ndef escape(data, entities={}):\n \"\"\"Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped\n\n Escape &, <, and > in a string of data.\n\n You can escape other strings of data by passing a dictionary as\n the optional entities parameter. The keys and values must all be\n strings; each key will be replaced with its corresponding value.\n\n \"\"\"\n # find character data, re.DOTALL includes linefeed in .\n pattern = re.compile('<!\\[CDATA\\[.*\\]\\]>', re.DOTALL)\n iterator = pattern.finditer(data)\n start = 0\n bits = []\n for match in iterator:\n #grab chunk before first match\n bit = data[start:match.span()[0]]\n bit = __escape(bit, entities)\n bits.append(bit)\n #grab match\n bit = data[match.span()[0]:match.span()[1]]\n bits.extend(bit)\n start = match.span()[1]\n # escape tail bit after last match\n bit = data[start:]\n bit = __escape(bit, entities)\n bits.extend(bit)\n data = ''.join(bits)\n return data\n\n\nclass RSRSimplerXMLGenerator(XMLGenerator):\n \"\"\"subclassed to be able to call custom escape() function, see above\n \"\"\"\n def characters(self, content):\n self._write(escape(content))\n\n def addQuickElement(self, name, contents=None, attrs=None):\n \"Convenience method for adding an element with no children\"\n if attrs is None: attrs = {}\n self.startElement(name, attrs)\n if contents is not None:\n self.characters(contents)\n self.endElement(name)\n\n\nclass RSRMediaRssFeed(Rss201rev2Feed):\n def rss_attributes(self):\n attrs = super(RSRMediaRssFeed, self).rss_attributes()\n attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'\n attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'\n return attrs\n\n def add_item_elements(self, handler, item):\n \"\"\"Callback to add elements to each item (item/entry) element.\"\"\"\n super(RSRMediaRssFeed, self).add_item_elements(handler, item)\n\n if 'media:title' in item:\n handler.addQuickElement(u\"media:title\", item['title'])\n if 'media:description' in item:\n handler.addQuickElement(u\"media:description\", item['media:description'])\n if 'media:credit' in item:\n handler.addQuickElement(u\"media:credit\", item['media:credit'])\n\n if 'content_url' in item:\n content = dict(url=item['content_url'])\n if 'content_width' in item:\n content['width'] = str(item['content_width'])\n if 'content_height' in item:\n content['height'] = str(item['content_height'])\n handler.addQuickElement(u\"media:content\", '', content)\n\n if 'thumbnail_url' in item:\n thumbnail = dict(url=item['thumbnail_url'])\n if 'thumbnail_width' in item:\n thumbnail['width'] = str(item['thumbnail_width'])\n if 'thumbnail_height' in item:\n thumbnail['height'] = str(item['thumbnail_height'])\n handler.addQuickElement(u\"media:thumbnail\", '', thumbnail)\n\n if 'keywords' in item:\n handler.addQuickElement(u\"media:keywords\", item['keywords'])\n\n def write(self, outfile, encoding):\n handler = RSRSimplerXMLGenerator(outfile, encoding)\n handler.startDocument()\n handler.startElement(u\"rss\", self.rss_attributes())\n handler.startElement(u\"channel\", self.root_attributes())\n self.add_root_elements(handler)\n self.write_items(handler)\n self.endChannelElement(handler)\n handler.endElement(u\"rss\")\n\nclass UpdateFeed(Feed):\n \"\"\"base class generating Update feeds\n \"\"\"\n feed_type = RSRMediaRssFeed\n\n def link(self, obj):\n if not obj:\n raise FeedDoesNotExist\n return obj.get_absolute_url()\n\n def item_link(self, item):\n return item.get_absolute_url()\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n try:\n size = item.photo.size\n return '<![CDATA[<p><a href=\"%s\"><img src=\"%s\" alt=\"\" /></a></p><p>%s</p>]]>' % (\n item.get_absolute_url(),\n item.photo.thumbnail.absolute_url,\n item.text,\n )\n except:\n return item.text\n\n def item_pubdate(self, item):\n return item.created_at\n\n def item_author_name(self, item):\n return item.user.get_full_name()\n\n def item_credit(self, item):\n return item.photo_credit\n\n def item_extra_kwargs(self, item):\n \"\"\"return a dictionary to the feedgenerator for each item to be added to the feed.\n \"\"\"\n try:\n size = item.photo.size\n photo = item.photo\n kwargs = {\n 'media:title': item.title,\n 'media:description': item.photo_caption,\n 'media:credit': item.photo_credit,\n 'content_url': photo.url,\n 'content_width': photo.width,\n 'content_height': photo.height,\n 'thumbnail_url': photo.thumbnail.absolute_url,\n 'thumbnail_width': photo.thumbnail.width(),\n 'thumbnail_height': photo.thumbnail.height(),\n }\n return kwargs\n except:\n return {}\n\n\nclass ProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of a project.\"\"\"\n def get_object(self, request, project_id):\n return Project.objects.get(pk__exact=project_id)\n\n def title(self, obj):\n return _(u'Akvo RSR project %(id)d: %(project_title)s') % {\n 'id': obj.id,\n 'project_title': obj.title\n }\n\n def description(self, obj):\n return _(u'Project updates for project %(project_title)s') % {\n 'project_title': obj.title\n }\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]\n\n\nclass OrganisationUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of an organisation.\"\"\"\n feed_type = RSRMediaRssFeed\n\n def get_object(self, request, org_id):\n return get_object_or_404(Organisation, id=int(org_id))\n\n def title(self, obj):\n return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}\n\n def description(self, obj):\n if obj.name == obj.long_name:\n return _(u\"Project updates for projects partnered by %(org_name)s\") % {\n 'org_name': obj.name\n }\n else:\n return _(\n u\"Project updates for projects partnered by %(org_name)s - %(long_name)s\"\n ) % {'org_name': obj.name, 'long_name': obj.long_name}\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return obj.published_projects().all_updates()[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n\n\nclass AllProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates.\"\"\"\n title = _(u'Last 25 RSR project updates')\n \n def link(self):\n return reverse('update-directory')\n\n description = _(u'Project updates for all Akvo RSR projects')\n\n def items(self):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.select_related().order_by('-id')[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n", "path": "akvo/rsr/feeds.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module. \n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport re\n\nfrom xml.sax.saxutils import XMLGenerator\n\nfrom django.contrib.syndication.views import FeedDoesNotExist, Feed\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.feedgenerator import Rss201rev2Feed\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom akvo.rsr.models import Project, ProjectUpdate, Organisation\n\n\ndef __dict_replace(s, d):\n \"\"\"Replace substrings of a string using a dictionary.\"\"\"\n for key, value in d.items():\n s = s.replace(key, value)\n return s\n\ndef __escape(data, entities):\n # must do ampersand first\n data = data.replace(\"&\", \"&amp;\")\n data = data.replace(\">\", \"&gt;\")\n data = data.replace(\"<\", \"&lt;\")\n if entities:\n data = __dict_replace(data, entities)\n return data\n\ndef escape(data, entities={}):\n \"\"\"Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped\n\n Escape &, <, and > in a string of data.\n\n You can escape other strings of data by passing a dictionary as\n the optional entities parameter. The keys and values must all be\n strings; each key will be replaced with its corresponding value.\n\n \"\"\"\n # find character data, re.DOTALL includes linefeed in .\n pattern = re.compile('<!\\[CDATA\\[.*\\]\\]>', re.DOTALL)\n iterator = pattern.finditer(data)\n start = 0\n bits = []\n for match in iterator:\n #grab chunk before first match\n bit = data[start:match.span()[0]]\n bit = __escape(bit, entities)\n bits.append(bit)\n #grab match\n bit = data[match.span()[0]:match.span()[1]]\n bits.extend(bit)\n start = match.span()[1]\n # escape tail bit after last match\n bit = data[start:]\n bit = __escape(bit, entities)\n bits.extend(bit)\n data = ''.join(bits)\n return data\n\n\nclass RSRSimplerXMLGenerator(XMLGenerator):\n \"\"\"subclassed to be able to call custom escape() function, see above\n \"\"\"\n def characters(self, content):\n self._write(escape(content))\n\n def addQuickElement(self, name, contents=None, attrs=None):\n \"Convenience method for adding an element with no children\"\n if attrs is None: attrs = {}\n self.startElement(name, attrs)\n if contents is not None:\n self.characters(contents)\n self.endElement(name)\n\n\nclass RSRMediaRssFeed(Rss201rev2Feed):\n def rss_attributes(self):\n attrs = super(RSRMediaRssFeed, self).rss_attributes()\n attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'\n attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'\n return attrs\n\n def add_item_elements(self, handler, item):\n \"\"\"Callback to add elements to each item (item/entry) element.\"\"\"\n super(RSRMediaRssFeed, self).add_item_elements(handler, item)\n\n if 'media:title' in item:\n handler.addQuickElement(u\"media:title\", item['title'])\n if 'media:description' in item:\n handler.addQuickElement(u\"media:description\", item['media:description'])\n if 'media:credit' in item:\n handler.addQuickElement(u\"media:credit\", item['media:credit'])\n\n if 'content_url' in item:\n content = dict(url=item['content_url'])\n if 'content_width' in item:\n content['width'] = str(item['content_width'])\n if 'content_height' in item:\n content['height'] = str(item['content_height'])\n handler.addQuickElement(u\"media:content\", '', content)\n\n if 'thumbnail_url' in item:\n thumbnail = dict(url=item['thumbnail_url'])\n if 'thumbnail_width' in item:\n thumbnail['width'] = str(item['thumbnail_width'])\n if 'thumbnail_height' in item:\n thumbnail['height'] = str(item['thumbnail_height'])\n handler.addQuickElement(u\"media:thumbnail\", '', thumbnail)\n\n if 'keywords' in item:\n handler.addQuickElement(u\"media:keywords\", item['keywords'])\n\n def write(self, outfile, encoding):\n handler = RSRSimplerXMLGenerator(outfile, encoding)\n handler.startDocument()\n handler.startElement(u\"rss\", self.rss_attributes())\n handler.startElement(u\"channel\", self.root_attributes())\n self.add_root_elements(handler)\n self.write_items(handler)\n self.endChannelElement(handler)\n handler.endElement(u\"rss\")\n\nclass UpdateFeed(Feed):\n \"\"\"base class generating Update feeds\n \"\"\"\n feed_type = RSRMediaRssFeed\n\n def link(self, obj):\n if not obj:\n raise FeedDoesNotExist\n return obj.get_absolute_url()\n\n def item_link(self, item):\n return item.get_absolute_url()\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n try:\n size = item.photo.size\n return '<![CDATA[<p><a href=\"%s\"><img src=\"%s\" alt=\"\" /></a></p><p>%s</p>]]>' % (\n item.get_absolute_url(),\n item.photo.thumbnail.absolute_url,\n item.text,\n )\n except:\n return item.text\n\n def item_pubdate(self, item):\n return item.created_at\n\n def item_author_name(self, item):\n return item.user.get_full_name()\n\n def item_credit(self, item):\n return item.photo_credit\n\n def item_extra_kwargs(self, item):\n \"\"\"return a dictionary to the feedgenerator for each item to be added to the feed.\n \"\"\"\n try:\n size = item.photo.size\n photo = item.photo\n kwargs = {\n 'media:title': item.title,\n 'media:description': item.photo_caption,\n 'media:credit': item.photo_credit,\n 'content_url': photo.url,\n 'content_width': photo.width,\n 'content_height': photo.height,\n 'thumbnail_url': photo.thumbnail.absolute_url,\n 'thumbnail_width': photo.thumbnail.width(),\n 'thumbnail_height': photo.thumbnail.height(),\n }\n return kwargs\n except:\n return {}\n\n\nclass ProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of a project.\"\"\"\n def get_object(self, request, project_id):\n return Project.objects.get(pk__exact=project_id)\n\n def title(self, obj):\n return _(u'Akvo RSR project %(id)d: %(project_title)s') % {\n 'id': obj.id,\n 'project_title': obj.title\n }\n\n def description(self, obj):\n return _(u'Project updates for project %(project_title)s') % {\n 'project_title': obj.title\n }\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]\n\n\nclass OrganisationUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of an organisation.\"\"\"\n feed_type = RSRMediaRssFeed\n\n def get_object(self, request, org_id):\n return get_object_or_404(Organisation, id=int(org_id))\n\n def title(self, obj):\n return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}\n\n def description(self, obj):\n if obj.name == obj.long_name:\n return _(u\"Project updates for projects partnered by %(org_name)s\") % {\n 'org_name': obj.name\n }\n else:\n return _(\n u\"Project updates for projects partnered by %(org_name)s - %(long_name)s\"\n ) % {'org_name': obj.name, 'long_name': obj.long_name}\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return obj.all_updates()[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n\n\nclass AllProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates.\"\"\"\n title = _(u'Last 25 RSR project updates')\n \n def link(self):\n return reverse('update-directory')\n\n description = _(u'Project updates for all Akvo RSR projects')\n\n def items(self):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.select_related().order_by('-id')[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n", "path": "akvo/rsr/feeds.py" } ]
diff --git a/akvo/rsr/feeds.py b/akvo/rsr/feeds.py index bc7e5802c2..4d95fde643 100644 --- a/akvo/rsr/feeds.py +++ b/akvo/rsr/feeds.py @@ -227,7 +227,7 @@ def description(self, obj): def items(self, obj): # Limited to 25 items to prevent gateway timeouts. - return obj.published_projects().all_updates()[:25] + return obj.all_updates()[:25] def item_title(self, item): return _( diff --git a/akvo/rsr/static/scripts-src/my-iati.js b/akvo/rsr/static/scripts-src/my-iati.js index b2ddeecb0b..c70e835948 100644 --- a/akvo/rsr/static/scripts-src/my-iati.js +++ b/akvo/rsr/static/scripts-src/my-iati.js @@ -327,7 +327,7 @@ function loadComponents() { onclickAll; if (this.props.projects.length > 0) { - // In case there are projets, show a table overview of the projects. + // In case there are projects, show a table overview of the projects. checked = this.props.projects.length === this.props.selectedProjects.length; onclickAll = checked ? this.props.deselectAll : this.props.selectAll; projects = this.sortedProjects().map(function(project) { @@ -384,7 +384,10 @@ function loadComponents() { selectedProjects: [], lastExport: null, publishedFilter: false, - exporting: false + exporting: false, + noErrorsChecked: false, + previousChecked: false, + publishedChecked: false }; }, @@ -427,6 +430,7 @@ function loadComponents() { if (projectIndex > -1) { newSelection.splice(projectIndex, 1); + this.checkUnchecked(); } else { newSelection.push(projectId); } @@ -451,6 +455,27 @@ function loadComponents() { apiCall('POST', url, data, true, exportAdded); }, + checkUnchecked: function() { + // A check whether the filters should be unchecked (after deselecting a project, + // for instance). + var selection = this.state.selectedProjects, + allProjects = this.state.allProjects.results, + lastProjects = this.state.lastExport[0].projects; + + for (var i = 0; i < allProjects.length; i++) { + var project = allProjects[i]; + if (project.checks_errors.length === 0 && selection.indexOf(project.id) < 0) { + this.setState({noErrorsChecked: false}); + } + if (lastProjects.indexOf(project.id) > -1 && selection.indexOf(project.id) < 0) { + this.setState({previousChecked: false}); + } + if (project.publishing_status === 'published' && selection.indexOf(project.id) < 0) { + this.setState({publishedChecked: false}); + } + } + }, + selectNoErrors: function(select) { var newSelection = this.state.selectedProjects; @@ -467,14 +492,6 @@ function loadComponents() { this.setState({selectedProjects: newSelection}); }, - selectNoErrorsProjects: function() { - this.selectNoErrors(true); - }, - - deselectNoErrorsProjects: function() { - this.selectNoErrors(false); - }, - checkNoErrors: function() { var noErrorsCount = 0; @@ -488,15 +505,14 @@ function loadComponents() { return noErrorsCount; }, - allNoErrorsSelected: function() { - for (var i = 0; i < this.state.allProjects.results.length; i++) { - var project = this.state.allProjects.results[i]; - - if (project.checks_errors.length === 0 && this.state.selectedProjects.indexOf(project.id) < 0) { - return false; - } + clickNoErrorsProjects: function() { + var previousState = this.state.noErrorsChecked; + this.setState({noErrorsChecked: !previousState}); + if (previousState) { + this.selectNoErrors(false); + } else { + this.selectNoErrors(true); } - return true; }, renderNoErrorsButton: function() { @@ -504,34 +520,77 @@ function loadComponents() { return ( React.DOM.span(null ) ); - } else if (this.allNoErrorsSelected()) { - if (!this.state.exporting) { - return ( - React.DOM.button( {className:"btn btn-default btn-sm"}, - React.DOM.input( {type:"checkbox", checked:true, onClick:this.deselectNoErrorsProjects} ), " ", cap(i18n.without_errors) - ) - ); - } else { - return ( - React.DOM.button( {className:"btn btn-default btn-sm disabled"}, - React.DOM.input( {type:"checkbox", checked:true} ), " ", cap(i18n.without_errors) - ) - ); + } else { + var buttonClass = "btn btn-default btn-sm"; + + if (this.state.exporting) { + buttonClass += " disabled"; + } + + return ( + React.DOM.button( {className:buttonClass, onClick:this.clickNoErrorsProjects} , + React.DOM.input( {type:"checkbox", checked:this.state.noErrorsChecked} ), " ", cap(i18n.without_errors) + ) + ); + } + }, + + selectPublished: function(select) { + var newSelection = this.state.selectedProjects; + + for (var i = 0; i < this.state.allProjects.results.length; i++) { + var project = this.state.allProjects.results[i], + newSelectionIndex = newSelection.indexOf(project.id); + if (select && newSelectionIndex < 0 && project.publishing_status === 'published') { + newSelection.push(project.id); + } else if (!select && newSelectionIndex > -1 && project.publishing_status === 'published') { + newSelection.splice(newSelectionIndex, 1); + } + } + + this.setState({selectedProjects: newSelection}); + }, + + checkPublished: function() { + var noErrorsCount = 0; + + for (var i = 0; i < this.state.allProjects.results.length; i++) { + var project = this.state.allProjects.results[i]; + if (project.publishing_status === 'published') { + return true; } + } + + return false; + }, + + clickPublishedProjects: function() { + var previousState = this.state.publishedChecked; + this.setState({publishedChecked: !previousState}); + if (previousState) { + this.selectPublished(false); } else { - if (!this.state.exporting) { - return ( - React.DOM.button( {className:"btn btn-default btn-sm"}, - React.DOM.input( {type:"checkbox", checked:false, onClick:this.selectNoErrorsProjects} ), " ", cap(i18n.without_errors) - ) - ); - } else { - return ( - React.DOM.button( {className:"btn btn-default btn-sm disabled"}, - React.DOM.input( {type:"checkbox", checked:false} ), " ", cap(i18n.without_errors) - ) - ); + this.selectPublished(true); + } + }, + + renderPublishedButton: function() { + if (this.state.allProjects === null || this.state.allProjects.results.length === 0 || !this.checkPublished()) { + return ( + React.DOM.span(null ) + ); + } else { + var buttonClass = "btn btn-default btn-sm"; + + if (this.state.exporting) { + buttonClass += " disabled"; } + + return ( + React.DOM.button( {className:buttonClass, onClick:this.clickPublishedProjects}, + React.DOM.input( {type:"checkbox", checked:this.state.publishedChecked} ), " ", cap(i18n.published) + ) + ); } }, @@ -552,14 +611,6 @@ function loadComponents() { this.setState({selectedProjects: newSelection}); }, - selectPreviousProjects: function() { - this.selectPrevious(true); - }, - - deselectPreviousProjects: function() { - this.selectPrevious(false); - }, - checkPrevious: function() { var lastProjects = this.state.lastExport[0].projects, countLastProjects = 0; @@ -574,40 +625,34 @@ function loadComponents() { return countLastProjects === lastProjects.length; }, + clickPreviousProjects: function() { + var previousState = this.state.previousChecked; + this.setState({previousChecked: !previousState}); + if (previousState) { + this.selectPrevious(false); + } else { + this.selectPrevious(true); + } + }, + renderSelectPreviousButton: function() { if (this.state.initializing || this.state.allProjects.results.length === 0 || this.state.lastExport.length === 0 || this.state.lastExport[0].projects.length === 0) { return ( React.DOM.span(null ) ); - } else if (this.checkPrevious()) { - if (!this.state.exporting) { - return ( - React.DOM.button( {className:"btn btn-default btn-sm", onClick:this.deselectPreviousProjects}, - React.DOM.input( {type:"checkbox", checked:true, onClick:this.deselectPreviousProjects} ), " ", cap(i18n.included_export) - ) - ); - } else { - return ( - React.DOM.button( {className:"btn btn-default btn-sm disabled"}, - React.DOM.input( {type:"checkbox", checked:true} ), " ", cap(i18n.included_export) - ) - ); - } } else { - if (!this.state.exporting) { - return ( - React.DOM.button( {className:"btn btn-default btn-sm", onClick:this.selectPreviousProjects}, - React.DOM.input( {type:"checkbox", checked:false, onClick:this.selectPreviousProjects} ), " ", cap(i18n.included_export) - ) - ); - } else { - return ( - React.DOM.button( {className:"btn btn-default btn-sm disabled"}, - React.DOM.input( {type:"checkbox", checked:false} ), " ", cap(i18n.included_export) - ) - ); + var buttonClass = "btn btn-default btn-sm"; + + if (this.state.exporting) { + buttonClass += " disabled"; } + + return ( + React.DOM.button( {className:buttonClass, onClick:this.clickPreviousProjects}, + React.DOM.input( {type:"checkbox", checked:this.state.previousChecked} ), " ", cap(i18n.included_export) + ) + ); } }, @@ -639,42 +684,6 @@ function loadComponents() { this.selectAll(false); }, - renderSelectAllButton: function() { - if (this.state.allProjects === null || this.state.allProjects.results.length === 0) { - return ( - React.DOM.span(null ) - ); - } else if (this.state.allProjects.results.length === this.state.selectedProjects.length) { - if (!this.state.exporting) { - return ( - React.DOM.button( {className:"btn btn-default btn-sm", onClick:this.deselectAllProjects}, - React.DOM.input( {type:"checkbox", checked:true, onClick:this.deselectAllProjects} ), " ", cap(i18n.all) - ) - ); - } else { - return ( - React.DOM.button( {className:"btn btn-default btn-sm disabled"}, - React.DOM.input( {type:"checkbox", checked:true} ), " ", cap(i18n.all) - ) - ); - } - } else { - if (!this.state.exporting) { - return ( - React.DOM.button( {className:"btn btn-default btn-sm", onClick:this.selectAllProjects}, - React.DOM.input( {type:"checkbox", checked:false, onClick:this.selectAllProjects} ), " ", cap(i18n.all) - ) - ); - } else { - return ( - React.DOM.button( {className:"btn btn-default btn-sm disabled"}, - React.DOM.input( {type:"checkbox", checked:false} ), " ", cap(i18n.all) - ) - ); - } - } - }, - selectProjects: function(select, key, value) { var newSelection = this.state.selectedProjects; @@ -711,89 +720,14 @@ function loadComponents() { return any ? false : count === countSelected; }, - renderFilter: function(projectKey, projectValue, name) { - var allProjects = this.checkProjects(projectKey, projectValue, false), - thisFilter = this; - - var selectProjects = function() { - thisFilter.selectProjects(true, projectKey, projectValue); - }; - - var deselectProjects = function() { - thisFilter.selectProjects(false, projectKey, projectValue); - }; - - if (allProjects) { - if (!this.state.exporting) { - return ( - React.DOM.button( {className:"btn btn-default btn-sm", onClick:deselectProjects}, - React.DOM.input( {type:"checkbox", checked:true, onClick:deselectProjects} ), " ", cap(name) - ) - ); - } else { - return ( - React.DOM.button( {className:"btn btn-default btn-sm disabled"}, - React.DOM.input( {type:"checkbox", checked:true} ), " ", cap(name) - ) - ); - } - } else { - if (!this.state.exporting) { - return ( - React.DOM.button( {className:"btn btn-default btn-sm", onClick:selectProjects}, - React.DOM.input( {type:"checkbox", checked:false, onClick:selectProjects} ), " ", cap(name) - ) - ); - } else { - return ( - React.DOM.button( {className:"btn btn-default btn-sm disabled"}, - React.DOM.input( {type:"checkbox", checked:false} ), " ", cap(name) - ) - ); - } - } - }, - renderFilters: function() { - var renderFilter = function(filter) { - var anyProjects = thisApp.checkProjects(filter[0], filter[1], true); - - if (anyProjects) { - return thisApp.renderFilter(filter[0], filter[1], filter[2]); - } else { - return ( - React.DOM.span(null ) - ); - } - }; - - var thisApp = this, - statusFilters = [ - ['status', 'H', i18n.needs_funding], - ['status', 'A', i18n.active], - ['status', 'C', i18n.completed], - ['status', 'L', i18n.cancelled], - ['status', 'R', i18n.archived] - ], - globalFilters = [ - ['publishing_status', 'published', i18n.published], - ['is_public', true, i18n.public] - ]; - - var renderedStatusFilters = statusFilters.map(renderFilter); - var renderedGlobalFilters = globalFilters.map(renderFilter); - return ( React.DOM.div( {className:"row iatiFilters"}, React.DOM.div( {className:"col-sm-8 filterGroup"}, - React.DOM.h3(null, cap(i18n.project_selection)), - React.DOM.p(null, cap(i18n.global_selection)), - this.renderSelectAllButton(), + React.DOM.h5(null, cap(i18n.project_selection)), this.renderNoErrorsButton(), this.renderSelectPreviousButton(), - renderedGlobalFilters, - React.DOM.p(null, cap(i18n.project_status)), - renderedStatusFilters + this.renderPublishedButton() ), React.DOM.div( {className:"col-sm-4 newIatiExport text-center"}, React.DOM.p(null, this.state.selectedProjects.length, " ", i18n.projects_selected), diff --git a/akvo/rsr/static/scripts-src/my-iati.jsx b/akvo/rsr/static/scripts-src/my-iati.jsx index 7d7c3daa05..c7d0a522bb 100644 --- a/akvo/rsr/static/scripts-src/my-iati.jsx +++ b/akvo/rsr/static/scripts-src/my-iati.jsx @@ -327,7 +327,7 @@ function loadComponents() { onclickAll; if (this.props.projects.length > 0) { - // In case there are projets, show a table overview of the projects. + // In case there are projects, show a table overview of the projects. checked = this.props.projects.length === this.props.selectedProjects.length; onclickAll = checked ? this.props.deselectAll : this.props.selectAll; projects = this.sortedProjects().map(function(project) { @@ -384,7 +384,10 @@ function loadComponents() { selectedProjects: [], lastExport: null, publishedFilter: false, - exporting: false + exporting: false, + noErrorsChecked: false, + previousChecked: false, + publishedChecked: false }; }, @@ -427,6 +430,7 @@ function loadComponents() { if (projectIndex > -1) { newSelection.splice(projectIndex, 1); + this.checkUnchecked(); } else { newSelection.push(projectId); } @@ -451,6 +455,27 @@ function loadComponents() { apiCall('POST', url, data, true, exportAdded); }, + checkUnchecked: function() { + // A check whether the filters should be unchecked (after deselecting a project, + // for instance). + var selection = this.state.selectedProjects, + allProjects = this.state.allProjects.results, + lastProjects = this.state.lastExport[0].projects; + + for (var i = 0; i < allProjects.length; i++) { + var project = allProjects[i]; + if (project.checks_errors.length === 0 && selection.indexOf(project.id) < 0) { + this.setState({noErrorsChecked: false}); + } + if (lastProjects.indexOf(project.id) > -1 && selection.indexOf(project.id) < 0) { + this.setState({previousChecked: false}); + } + if (project.publishing_status === 'published' && selection.indexOf(project.id) < 0) { + this.setState({publishedChecked: false}); + } + } + }, + selectNoErrors: function(select) { var newSelection = this.state.selectedProjects; @@ -467,14 +492,6 @@ function loadComponents() { this.setState({selectedProjects: newSelection}); }, - selectNoErrorsProjects: function() { - this.selectNoErrors(true); - }, - - deselectNoErrorsProjects: function() { - this.selectNoErrors(false); - }, - checkNoErrors: function() { var noErrorsCount = 0; @@ -488,15 +505,14 @@ function loadComponents() { return noErrorsCount; }, - allNoErrorsSelected: function() { - for (var i = 0; i < this.state.allProjects.results.length; i++) { - var project = this.state.allProjects.results[i]; - - if (project.checks_errors.length === 0 && this.state.selectedProjects.indexOf(project.id) < 0) { - return false; - } + clickNoErrorsProjects: function() { + var previousState = this.state.noErrorsChecked; + this.setState({noErrorsChecked: !previousState}); + if (previousState) { + this.selectNoErrors(false); + } else { + this.selectNoErrors(true); } - return true; }, renderNoErrorsButton: function() { @@ -504,34 +520,77 @@ function loadComponents() { return ( <span /> ); - } else if (this.allNoErrorsSelected()) { - if (!this.state.exporting) { - return ( - <button className="btn btn-default btn-sm"> - <input type="checkbox" checked={true} onClick={this.deselectNoErrorsProjects} /> {cap(i18n.without_errors)} - </button> - ); - } else { - return ( - <button className="btn btn-default btn-sm disabled"> - <input type="checkbox" checked={true} /> {cap(i18n.without_errors)} - </button> - ); + } else { + var buttonClass = "btn btn-default btn-sm"; + + if (this.state.exporting) { + buttonClass += " disabled"; + } + + return ( + <button className={buttonClass} onClick={this.clickNoErrorsProjects}> + <input type="checkbox" checked={this.state.noErrorsChecked} /> {cap(i18n.without_errors)} + </button> + ); + } + }, + + selectPublished: function(select) { + var newSelection = this.state.selectedProjects; + + for (var i = 0; i < this.state.allProjects.results.length; i++) { + var project = this.state.allProjects.results[i], + newSelectionIndex = newSelection.indexOf(project.id); + if (select && newSelectionIndex < 0 && project.publishing_status === 'published') { + newSelection.push(project.id); + } else if (!select && newSelectionIndex > -1 && project.publishing_status === 'published') { + newSelection.splice(newSelectionIndex, 1); + } + } + + this.setState({selectedProjects: newSelection}); + }, + + checkPublished: function() { + var noErrorsCount = 0; + + for (var i = 0; i < this.state.allProjects.results.length; i++) { + var project = this.state.allProjects.results[i]; + if (project.publishing_status === 'published') { + return true; } + } + + return false; + }, + + clickPublishedProjects: function() { + var previousState = this.state.publishedChecked; + this.setState({publishedChecked: !previousState}); + if (previousState) { + this.selectPublished(false); } else { - if (!this.state.exporting) { - return ( - <button className="btn btn-default btn-sm"> - <input type="checkbox" checked={false} onClick={this.selectNoErrorsProjects} /> {cap(i18n.without_errors)} - </button> - ); - } else { - return ( - <button className="btn btn-default btn-sm disabled"> - <input type="checkbox" checked={false} /> {cap(i18n.without_errors)} - </button> - ); + this.selectPublished(true); + } + }, + + renderPublishedButton: function() { + if (this.state.allProjects === null || this.state.allProjects.results.length === 0 || !this.checkPublished()) { + return ( + <span /> + ); + } else { + var buttonClass = "btn btn-default btn-sm"; + + if (this.state.exporting) { + buttonClass += " disabled"; } + + return ( + <button className={buttonClass} onClick={this.clickPublishedProjects}> + <input type="checkbox" checked={this.state.publishedChecked} /> {cap(i18n.published)} + </button> + ); } }, @@ -552,14 +611,6 @@ function loadComponents() { this.setState({selectedProjects: newSelection}); }, - selectPreviousProjects: function() { - this.selectPrevious(true); - }, - - deselectPreviousProjects: function() { - this.selectPrevious(false); - }, - checkPrevious: function() { var lastProjects = this.state.lastExport[0].projects, countLastProjects = 0; @@ -574,40 +625,34 @@ function loadComponents() { return countLastProjects === lastProjects.length; }, + clickPreviousProjects: function() { + var previousState = this.state.previousChecked; + this.setState({previousChecked: !previousState}); + if (previousState) { + this.selectPrevious(false); + } else { + this.selectPrevious(true); + } + }, + renderSelectPreviousButton: function() { if (this.state.initializing || this.state.allProjects.results.length === 0 || this.state.lastExport.length === 0 || this.state.lastExport[0].projects.length === 0) { return ( <span /> ); - } else if (this.checkPrevious()) { - if (!this.state.exporting) { - return ( - <button className="btn btn-default btn-sm" onClick={this.deselectPreviousProjects}> - <input type="checkbox" checked={true} onClick={this.deselectPreviousProjects} /> {cap(i18n.included_export)} - </button> - ); - } else { - return ( - <button className="btn btn-default btn-sm disabled"> - <input type="checkbox" checked={true} /> {cap(i18n.included_export)} - </button> - ); - } } else { - if (!this.state.exporting) { - return ( - <button className="btn btn-default btn-sm" onClick={this.selectPreviousProjects}> - <input type="checkbox" checked={false} onClick={this.selectPreviousProjects} /> {cap(i18n.included_export)} - </button> - ); - } else { - return ( - <button className="btn btn-default btn-sm disabled"> - <input type="checkbox" checked={false} /> {cap(i18n.included_export)} - </button> - ); + var buttonClass = "btn btn-default btn-sm"; + + if (this.state.exporting) { + buttonClass += " disabled"; } + + return ( + <button className={buttonClass} onClick={this.clickPreviousProjects}> + <input type="checkbox" checked={this.state.previousChecked} /> {cap(i18n.included_export)} + </button> + ); } }, @@ -639,42 +684,6 @@ function loadComponents() { this.selectAll(false); }, - renderSelectAllButton: function() { - if (this.state.allProjects === null || this.state.allProjects.results.length === 0) { - return ( - <span /> - ); - } else if (this.state.allProjects.results.length === this.state.selectedProjects.length) { - if (!this.state.exporting) { - return ( - <button className="btn btn-default btn-sm" onClick={this.deselectAllProjects}> - <input type="checkbox" checked={true} onClick={this.deselectAllProjects} /> {cap(i18n.all)} - </button> - ); - } else { - return ( - <button className="btn btn-default btn-sm disabled"> - <input type="checkbox" checked={true} /> {cap(i18n.all)} - </button> - ); - } - } else { - if (!this.state.exporting) { - return ( - <button className="btn btn-default btn-sm" onClick={this.selectAllProjects}> - <input type="checkbox" checked={false} onClick={this.selectAllProjects} /> {cap(i18n.all)} - </button> - ); - } else { - return ( - <button className="btn btn-default btn-sm disabled"> - <input type="checkbox" checked={false} /> {cap(i18n.all)} - </button> - ); - } - } - }, - selectProjects: function(select, key, value) { var newSelection = this.state.selectedProjects; @@ -711,89 +720,14 @@ function loadComponents() { return any ? false : count === countSelected; }, - renderFilter: function(projectKey, projectValue, name) { - var allProjects = this.checkProjects(projectKey, projectValue, false), - thisFilter = this; - - var selectProjects = function() { - thisFilter.selectProjects(true, projectKey, projectValue); - }; - - var deselectProjects = function() { - thisFilter.selectProjects(false, projectKey, projectValue); - }; - - if (allProjects) { - if (!this.state.exporting) { - return ( - <button className="btn btn-default btn-sm" onClick={deselectProjects}> - <input type="checkbox" checked={true} onClick={deselectProjects} /> {cap(name)} - </button> - ); - } else { - return ( - <button className="btn btn-default btn-sm disabled"> - <input type="checkbox" checked={true} /> {cap(name)} - </button> - ); - } - } else { - if (!this.state.exporting) { - return ( - <button className="btn btn-default btn-sm" onClick={selectProjects}> - <input type="checkbox" checked={false} onClick={selectProjects} /> {cap(name)} - </button> - ); - } else { - return ( - <button className="btn btn-default btn-sm disabled"> - <input type="checkbox" checked={false} /> {cap(name)} - </button> - ); - } - } - }, - renderFilters: function() { - var renderFilter = function(filter) { - var anyProjects = thisApp.checkProjects(filter[0], filter[1], true); - - if (anyProjects) { - return thisApp.renderFilter(filter[0], filter[1], filter[2]); - } else { - return ( - <span /> - ); - } - }; - - var thisApp = this, - statusFilters = [ - ['status', 'H', i18n.needs_funding], - ['status', 'A', i18n.active], - ['status', 'C', i18n.completed], - ['status', 'L', i18n.cancelled], - ['status', 'R', i18n.archived] - ], - globalFilters = [ - ['publishing_status', 'published', i18n.published], - ['is_public', true, i18n.public] - ]; - - var renderedStatusFilters = statusFilters.map(renderFilter); - var renderedGlobalFilters = globalFilters.map(renderFilter); - return ( <div className="row iatiFilters"> <div className="col-sm-8 filterGroup"> - <h3>{cap(i18n.project_selection)}</h3> - <p>{cap(i18n.global_selection)}</p> - {this.renderSelectAllButton()} + <h5>{cap(i18n.project_selection)}</h5> {this.renderNoErrorsButton()} {this.renderSelectPreviousButton()} - {renderedGlobalFilters} - <p>{cap(i18n.project_status)}</p> - {renderedStatusFilters} + {this.renderPublishedButton()} </div> <div className="col-sm-4 newIatiExport text-center"> <p>{this.state.selectedProjects.length} {i18n.projects_selected}</p> diff --git a/akvo/rsr/static/styles-src/main.css b/akvo/rsr/static/styles-src/main.css index dfe212d80c..1c20bb0574 100755 --- a/akvo/rsr/static/styles-src/main.css +++ b/akvo/rsr/static/styles-src/main.css @@ -3992,8 +3992,6 @@ div.iatiFilters { div.iatiFilters h3 { margin-top: 0; display: block; } - div.iatiFilters .newIatiExport p { - margin-top: 10%; } div.iatiFilters .newIatiExport .btn { display: inline-block; margin-left: auto; diff --git a/akvo/rsr/static/styles-src/main.scss b/akvo/rsr/static/styles-src/main.scss index a2a95f3e0b..81e1115e33 100755 --- a/akvo/rsr/static/styles-src/main.scss +++ b/akvo/rsr/static/styles-src/main.scss @@ -4690,9 +4690,6 @@ div.iatiFilters { display: block; } .newIatiExport { - p { - margin-top: 10%; - } .btn { display: inline-block; margin-left: auto;
kubeflow__pipelines-5054
TypeErro occurs in gcp/automl/create_dataset_for_tables component ### What steps did you take: [A clear and concise description of what the bug is.] [gcp/automl/create_dataset_for_tables component](https://github.com/kubeflow/pipelines/tree/master/components/gcp/automl/create_dataset_for_tables)'s `create_time` output is declared as a string: https://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.yaml#L15 however, `google.protobuf.timestamp_pb2.Timestamp` is returned in actual fact: https://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.py#L54 FYI: The `dataset` object is an instance of `google.cloud.automl_v1beta1.types.Dataset` class and its [document](https://googleapis.dev/python/automl/0.4.0/gapic/v1beta1/types.html#google.cloud.automl_v1beta1.types.Dataset.create_time) says: > **create_time** > Output only. Timestamp when this dataset was created. ### What happened: `TypeError` occurs ![image](https://user-images.githubusercontent.com/96157/106237273-cf955a00-6241-11eb-91e2-2c53e4e82623.png) ### What did you expect to happen: Work. ### Environment: <!-- Please fill in those that seem relevant. --> How did you deploy Kubeflow Pipelines (KFP)? AI Platform Pipelines <!-- If you are not sure, here's [an introduction of all options](https://www.kubeflow.org/docs/pipelines/installation/overview/). --> KFP version: 1.0.4 <!-- If you are not sure, build commit shows on bottom of KFP UI left sidenav. --> KFP SDK version: 1.3.0 <!-- Please attach the output of this shell command: $pip list | grep kfp --> ### Anything else you would like to add: [Miscellaneous information that will assist in solving the issue.] /kind bug <!-- Please include labels by uncommenting them to help us better triage issues, choose from the following --> <!-- // /area frontend // /area backend // /area sdk // /area testing // /area engprod -->
[ { "content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import NamedTuple\n\n\ndef automl_create_dataset_for_tables(\n gcp_project_id: str,\n gcp_region: str,\n display_name: str,\n description: str = None,\n tables_dataset_metadata: dict = {},\n retry=None, #=google.api_core.gapic_v1.method.DEFAULT,\n timeout: float = None, #=google.api_core.gapic_v1.method.DEFAULT,\n metadata: dict = None,\n) -> NamedTuple('Outputs', [('dataset_path', str), ('create_time', str), ('dataset_id', str), ('dataset_url', 'URI')]):\n '''automl_create_dataset_for_tables creates an empty Dataset for AutoML tables\n '''\n import google\n from google.cloud import automl\n client = automl.AutoMlClient()\n\n location_path = client.location_path(gcp_project_id, gcp_region)\n dataset_dict = {\n 'display_name': display_name,\n 'description': description,\n 'tables_dataset_metadata': tables_dataset_metadata,\n }\n dataset = client.create_dataset(\n location_path,\n dataset_dict,\n retry or google.api_core.gapic_v1.method.DEFAULT,\n timeout or google.api_core.gapic_v1.method.DEFAULT,\n metadata,\n )\n print(dataset)\n dataset_id = dataset.name.rsplit('/', 1)[-1]\n dataset_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id}/schemav2?project={project_id}'.format(\n project_id=gcp_project_id,\n region=gcp_region,\n dataset_id=dataset_id,\n )\n return (dataset.name, dataset.create_time, dataset_id, dataset_url)\n\n\nif __name__ == '__main__':\n import kfp\n kfp.components.func_to_container_op(\n automl_create_dataset_for_tables,\n output_component_file='component.yaml',\n base_image='python:3.7',\n packages_to_install=['google-cloud-automl==0.4.0']\n )\n", "path": "components/gcp/automl/create_dataset_for_tables/component.py" } ]
[ { "content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import NamedTuple\n\n\ndef automl_create_dataset_for_tables(\n gcp_project_id: str,\n gcp_region: str,\n display_name: str,\n description: str = None,\n tables_dataset_metadata: dict = {},\n retry=None, #=google.api_core.gapic_v1.method.DEFAULT,\n timeout: float = None, #=google.api_core.gapic_v1.method.DEFAULT,\n metadata: dict = None,\n) -> NamedTuple('Outputs', [('dataset_path', str), ('create_time', str), ('dataset_id', str), ('dataset_url', 'URI')]):\n '''automl_create_dataset_for_tables creates an empty Dataset for AutoML tables\n '''\n import google\n from google.cloud import automl\n client = automl.AutoMlClient()\n\n location_path = client.location_path(gcp_project_id, gcp_region)\n dataset_dict = {\n 'display_name': display_name,\n 'description': description,\n 'tables_dataset_metadata': tables_dataset_metadata,\n }\n dataset = client.create_dataset(\n location_path,\n dataset_dict,\n retry or google.api_core.gapic_v1.method.DEFAULT,\n timeout or google.api_core.gapic_v1.method.DEFAULT,\n metadata,\n )\n print(dataset)\n dataset_id = dataset.name.rsplit('/', 1)[-1]\n dataset_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id}/schemav2?project={project_id}'.format(\n project_id=gcp_project_id,\n region=gcp_region,\n dataset_id=dataset_id,\n )\n return (dataset.name, str(dataset.create_time), dataset_id, dataset_url)\n\n\nif __name__ == '__main__':\n import kfp\n kfp.components.func_to_container_op(\n automl_create_dataset_for_tables,\n output_component_file='component.yaml',\n base_image='python:3.7',\n packages_to_install=['google-cloud-automl==0.4.0']\n )\n", "path": "components/gcp/automl/create_dataset_for_tables/component.py" } ]
diff --git a/components/gcp/automl/create_dataset_for_tables/component.py b/components/gcp/automl/create_dataset_for_tables/component.py index 9239e780b15..968a8ad55d0 100644 --- a/components/gcp/automl/create_dataset_for_tables/component.py +++ b/components/gcp/automl/create_dataset_for_tables/component.py @@ -51,7 +51,7 @@ def automl_create_dataset_for_tables( region=gcp_region, dataset_id=dataset_id, ) - return (dataset.name, dataset.create_time, dataset_id, dataset_url) + return (dataset.name, str(dataset.create_time), dataset_id, dataset_url) if __name__ == '__main__': diff --git a/components/gcp/automl/create_dataset_for_tables/component.yaml b/components/gcp/automl/create_dataset_for_tables/component.yaml index 74257db9fdd..1a79fc40bdd 100644 --- a/components/gcp/automl/create_dataset_for_tables/component.yaml +++ b/components/gcp/automl/create_dataset_for_tables/component.yaml @@ -65,7 +65,7 @@ implementation: region=gcp_region, dataset_id=dataset_id, ) - return (dataset.name, dataset.create_time, dataset_id, dataset_url) + return (dataset.name, str(dataset.create_time), dataset_id, dataset_url) import json def _serialize_str(str_value: str) -> str:
streamlink__streamlink-4628
plugins.twitcasting: Writes JSON into video files when it shouldn't ### Checklist - [X] This is a plugin issue and not a different kind of issue - [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink) - [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22) - [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master) ### Streamlink version Latest build from the master branch ### Description https://github.com/streamlink/streamlink/pull/4608 introduced a bug of JSON being written to the output file. - When running streamlink on a channel that is live but only for members, using `-o out.mp4` flag to output to a file, it creates a video file containing just a single JSON line in it: ``` $ cat out.mp4 {"type":"status","code":403,"text":"Access Forbidden"} ``` The expected behavior is that it doesn't create the file in such situation, like it used to behave before https://github.com/streamlink/streamlink/pull/4608 fixes were made. - It also adds `{"type":"status","code":504,"text":"End of Live"}` at the end of video files when the stream ends: ``` $ xxd -s -128 -c 16 out.ts 24b5bee9: 5c75 7cc6 7e38 e099 55d9 6257 59d8 eb6e \u|.~8..U.bWY..n 24b5bef9: b7aa 49bb ef3a dd18 7767 8c77 7dc6 6ade ..I..:..wg.w}.j. 24b5bf09: 6d54 2175 2acf 0926 400f 0449 2bc6 a816 mT!u*..&@..I+... 24b5bf19: 3523 72e9 db4d 6c5a 5aba ec75 3c0a ad72 5#r..MlZZ..u<..r 24b5bf29: 2258 0b2f ebc2 b50a 7ed3 bbbd 8d30 c77b "X./....~....0.{ 24b5bf39: 2274 7970 6522 3a22 7374 6174 7573 222c "type":"status", 24b5bf49: 2263 6f64 6522 3a35 3034 2c22 7465 7874 "code":504,"text 24b5bf59: 223a 2245 6e64 206f 6620 4c69 7665 227d ":"End of Live"} ``` ![streamlink_json](https://user-images.githubusercontent.com/1855294/175794392-7bbaa204-60ac-4170-962b-3d6dac0be9ae.png) - Perhaps it shouldn't be writing any `response['type'] == 'status'` to the file? - While at it, maybe there is something else that it's writing to a video file that it shouldn't? As mentioned in https://github.com/streamlink/streamlink/issues/4604#issuecomment-1166177130, Twitcasting also sends `{"type":"event","code":100,"text":""}` sometimes. Would that get written into the video file too? Is that something that should be written into it? ### Debug log ```text [cli][debug] OS: Linux-5.10.0-14-amd64-x86_64-with-glibc2.31 [cli][debug] Python: 3.9.2 [cli][debug] Streamlink: 4.1.0+37.g2c564dbe [cli][debug] Dependencies: [cli][debug] isodate: 0.6.0 [cli][debug] lxml: 4.7.1 [cli][debug] pycountry: 20.7.3 [cli][debug] pycryptodome: 3.10.1 [cli][debug] PySocks: 1.7.1 [cli][debug] requests: 2.28.0 [cli][debug] websocket-client: 1.2.3 [cli][debug] Arguments: [cli][debug] url=https://twitcasting.tv/[REDACTED] [cli][debug] stream=['best'] [cli][debug] --config=['../config'] [cli][debug] --loglevel=debug [cli][debug] --output=[REDACTED] [cli][debug] --retry-streams=1.0 [cli][debug] --retry-max=300 [cli][info] Found matching plugin twitcasting for URL https://twitcasting.tv/[REDACTED] [plugins.twitcasting][debug] Live stream info: {'movie': {'id': [REDACTED], 'live': True}, 'fmp4': {'host': '202-218-171-197.twitcasting.tv', 'proto': 'wss', 'source': False, 'mobilesource': False}} [plugins.twitcasting][debug] Real stream url: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base [cli][info] Available streams: base (worst, best) [cli][info] Opening stream: base (stream) [cli][info] Writing output to [REDACTED] [cli][debug] Checking file output [plugin.api.websocket][debug] Connecting to: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base [cli][debug] Pre-buffering 8192 bytes [plugin.api.websocket][debug] Connected: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base [cli][debug] Writing stream to output [plugin.api.websocket][error] Connection to remote host was lost. [plugin.api.websocket][debug] Closed: wss://202-218-171-197.twitcasting.tv/ws.app/stream/[REDACTED]/fmp4/bd/1/1500?mode=base [cli][info] Stream ended [cli][info] Closing currently open stream... ```
[ { "content": "\"\"\"\n$description Global live broadcasting and live broadcast archiving social platform.\n$url twitcasting.tv\n$type live\n\"\"\"\n\nimport hashlib\nimport logging\nimport re\n\nfrom streamlink.buffers import RingBuffer\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.plugin.api.websocket import WebsocketClient\nfrom streamlink.stream.stream import Stream, StreamIO\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://twitcasting\\.tv/(?P<channel>[^/]+)\"\n))\nclass TwitCasting(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"Password for private Twitcasting streams.\"\n )\n )\n _STREAM_INFO_URL = \"https://twitcasting.tv/streamserver.php?target={channel}&mode=client\"\n _STREAM_REAL_URL = \"{proto}://{host}/ws.app/stream/{movie_id}/fmp4/bd/1/1500?mode={mode}\"\n\n _STREAM_INFO_SCHEMA = validate.Schema({\n validate.optional(\"movie\"): {\n \"id\": int,\n \"live\": bool\n },\n validate.optional(\"fmp4\"): {\n \"host\": str,\n \"proto\": str,\n \"source\": bool,\n \"mobilesource\": bool\n }\n })\n\n def __init__(self, url):\n super().__init__(url)\n self.channel = self.match.group(\"channel\")\n\n def _get_streams(self):\n stream_info = self._get_stream_info()\n log.debug(f\"Live stream info: {stream_info}\")\n\n if not stream_info.get(\"movie\") or not stream_info[\"movie\"][\"live\"]:\n raise PluginError(\"The live stream is offline\")\n\n if not stream_info.get(\"fmp4\"):\n raise PluginError(\"Login required\")\n\n # Keys are already validated by schema above\n proto = stream_info[\"fmp4\"][\"proto\"]\n host = stream_info[\"fmp4\"][\"host\"]\n movie_id = stream_info[\"movie\"][\"id\"]\n\n if stream_info[\"fmp4\"][\"source\"]:\n mode = \"main\" # High quality\n elif stream_info[\"fmp4\"][\"mobilesource\"]:\n mode = \"mobilesource\" # Medium quality\n else:\n mode = \"base\" # Low quality\n\n if (proto == '') or (host == '') or (not movie_id):\n raise PluginError(f\"No stream available for user {self.channel}\")\n\n real_stream_url = self._STREAM_REAL_URL.format(proto=proto, host=host, movie_id=movie_id, mode=mode)\n\n password = self.options.get(\"password\")\n if password is not None:\n password_hash = hashlib.md5(password.encode()).hexdigest()\n real_stream_url = update_qsd(real_stream_url, {\"word\": password_hash})\n\n log.debug(f\"Real stream url: {real_stream_url}\")\n\n return {mode: TwitCastingStream(session=self.session, url=real_stream_url)}\n\n def _get_stream_info(self):\n url = self._STREAM_INFO_URL.format(channel=self.channel)\n res = self.session.http.get(url)\n return self.session.http.json(res, schema=self._STREAM_INFO_SCHEMA)\n\n\nclass TwitCastingWsClient(WebsocketClient):\n def __init__(self, buffer: RingBuffer, *args, **kwargs):\n self.buffer = buffer\n super().__init__(*args, **kwargs)\n\n def on_close(self, *args, **kwargs):\n super().on_close(*args, **kwargs)\n self.buffer.close()\n\n def on_data(self, wsapp, data, data_type, cont):\n if data_type == self.OPCODE_TEXT:\n data = bytes(data, \"utf-8\")\n\n try:\n self.buffer.write(data)\n except Exception as err:\n log.error(err)\n self.close()\n\n\nclass TwitCastingReader(StreamIO):\n def __init__(self, stream: \"TwitCastingStream\", timeout=None):\n super().__init__()\n self.session = stream.session\n self.stream = stream\n self.timeout = timeout or self.session.options.get(\"stream-timeout\")\n\n buffer_size = self.session.get_option(\"ringbuffer-size\")\n self.buffer = RingBuffer(buffer_size)\n\n self.wsclient = TwitCastingWsClient(\n self.buffer,\n stream.session,\n stream.url,\n origin=\"https://twitcasting.tv/\"\n )\n\n def open(self):\n self.wsclient.start()\n\n def close(self):\n self.wsclient.close()\n self.buffer.close()\n\n def read(self, size):\n return self.buffer.read(\n size,\n block=self.wsclient.is_alive(),\n timeout=self.timeout\n )\n\n\nclass TwitCastingStream(Stream):\n def __init__(self, session, url):\n super().__init__(session)\n self.url = url\n\n def to_url(self):\n return self.url\n\n def open(self):\n reader = TwitCastingReader(self)\n reader.open()\n return reader\n\n\n__plugin__ = TwitCasting\n", "path": "src/streamlink/plugins/twitcasting.py" } ]
[ { "content": "\"\"\"\n$description Global live broadcasting and live broadcast archiving social platform.\n$url twitcasting.tv\n$type live\n\"\"\"\n\nimport hashlib\nimport logging\nimport re\n\nfrom streamlink.buffers import RingBuffer\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.plugin.api.websocket import WebsocketClient\nfrom streamlink.stream.stream import Stream, StreamIO\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://twitcasting\\.tv/(?P<channel>[^/]+)\"\n))\nclass TwitCasting(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"Password for private Twitcasting streams.\"\n )\n )\n _STREAM_INFO_URL = \"https://twitcasting.tv/streamserver.php?target={channel}&mode=client\"\n _STREAM_REAL_URL = \"{proto}://{host}/ws.app/stream/{movie_id}/fmp4/bd/1/1500?mode={mode}\"\n\n _STREAM_INFO_SCHEMA = validate.Schema({\n validate.optional(\"movie\"): {\n \"id\": int,\n \"live\": bool\n },\n validate.optional(\"fmp4\"): {\n \"host\": str,\n \"proto\": str,\n \"source\": bool,\n \"mobilesource\": bool\n }\n })\n\n def __init__(self, url):\n super().__init__(url)\n self.channel = self.match.group(\"channel\")\n\n def _get_streams(self):\n stream_info = self._get_stream_info()\n log.debug(f\"Live stream info: {stream_info}\")\n\n if not stream_info.get(\"movie\") or not stream_info[\"movie\"][\"live\"]:\n raise PluginError(\"The live stream is offline\")\n\n if not stream_info.get(\"fmp4\"):\n raise PluginError(\"Login required\")\n\n # Keys are already validated by schema above\n proto = stream_info[\"fmp4\"][\"proto\"]\n host = stream_info[\"fmp4\"][\"host\"]\n movie_id = stream_info[\"movie\"][\"id\"]\n\n if stream_info[\"fmp4\"][\"source\"]:\n mode = \"main\" # High quality\n elif stream_info[\"fmp4\"][\"mobilesource\"]:\n mode = \"mobilesource\" # Medium quality\n else:\n mode = \"base\" # Low quality\n\n if (proto == '') or (host == '') or (not movie_id):\n raise PluginError(f\"No stream available for user {self.channel}\")\n\n real_stream_url = self._STREAM_REAL_URL.format(proto=proto, host=host, movie_id=movie_id, mode=mode)\n\n password = self.options.get(\"password\")\n if password is not None:\n password_hash = hashlib.md5(password.encode()).hexdigest()\n real_stream_url = update_qsd(real_stream_url, {\"word\": password_hash})\n\n log.debug(f\"Real stream url: {real_stream_url}\")\n\n return {mode: TwitCastingStream(session=self.session, url=real_stream_url)}\n\n def _get_stream_info(self):\n url = self._STREAM_INFO_URL.format(channel=self.channel)\n res = self.session.http.get(url)\n return self.session.http.json(res, schema=self._STREAM_INFO_SCHEMA)\n\n\nclass TwitCastingWsClient(WebsocketClient):\n def __init__(self, buffer: RingBuffer, *args, **kwargs):\n self.buffer = buffer\n super().__init__(*args, **kwargs)\n\n def on_close(self, *args, **kwargs):\n super().on_close(*args, **kwargs)\n self.buffer.close()\n\n def on_data(self, wsapp, data, data_type, cont):\n if data_type == self.OPCODE_TEXT:\n return\n\n try:\n self.buffer.write(data)\n except Exception as err:\n log.error(err)\n self.close()\n\n\nclass TwitCastingReader(StreamIO):\n def __init__(self, stream: \"TwitCastingStream\", timeout=None):\n super().__init__()\n self.session = stream.session\n self.stream = stream\n self.timeout = timeout or self.session.options.get(\"stream-timeout\")\n\n buffer_size = self.session.get_option(\"ringbuffer-size\")\n self.buffer = RingBuffer(buffer_size)\n\n self.wsclient = TwitCastingWsClient(\n self.buffer,\n stream.session,\n stream.url,\n origin=\"https://twitcasting.tv/\"\n )\n\n def open(self):\n self.wsclient.start()\n\n def close(self):\n self.wsclient.close()\n self.buffer.close()\n\n def read(self, size):\n return self.buffer.read(\n size,\n block=self.wsclient.is_alive(),\n timeout=self.timeout\n )\n\n\nclass TwitCastingStream(Stream):\n def __init__(self, session, url):\n super().__init__(session)\n self.url = url\n\n def to_url(self):\n return self.url\n\n def open(self):\n reader = TwitCastingReader(self)\n reader.open()\n return reader\n\n\n__plugin__ = TwitCasting\n", "path": "src/streamlink/plugins/twitcasting.py" } ]
diff --git a/src/streamlink/plugins/twitcasting.py b/src/streamlink/plugins/twitcasting.py index e2f8fb097e9..206845d4062 100644 --- a/src/streamlink/plugins/twitcasting.py +++ b/src/streamlink/plugins/twitcasting.py @@ -104,7 +104,7 @@ def on_close(self, *args, **kwargs): def on_data(self, wsapp, data, data_type, cont): if data_type == self.OPCODE_TEXT: - data = bytes(data, "utf-8") + return try: self.buffer.write(data)
huggingface__diffusers-6012
logging.remove_handler() has a faulty assertion, doesn't allow registered handlers to be removed ### Describe the bug in `utils/logging.py` there the function remove_handler seems to have a faulty assertion in it. ```py def add_handler(handler: logging.Handler) -> None: """adds a handler to the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: """removes given handler from the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers # <<< THIS ONE _get_library_root_logger().removeHandler(handler) ``` That line seems to have a `not` that shouldn't be there. (the `not in` to be precise) Normally I'd just do a PR to remove it, but as I'm not that familiar with the codebase, nor Python in general, and don't yet have a solid grasp of what these things actually do (I'm just playing around trying to familiarize myself), I decided to make an issue instead so people who actually know the codebase can do it. ### Reproduction ```py from diffusers import logging from logging import Handler class TestHandler(Handler): def __init__(self): super().__init__() def emit(self): pass handler = TestHandler() logging.add_handler(handler) logging.remove_handler(handler) ``` ### Logs ```shell Traceback (most recent call last): File ".\test.py", line 14, in <module> logging.remove_handler(handler) File "C:\Users\XXX\XXX\venv\lib\site-packages\diffusers\utils\logging.py", line 221, in remove_handler assert handler is not None and handler not in _get_library_root_logger().handlers AssertionError ``` ### System Info - `diffusers` version: 0.3.0 - Platform: Windows-10-10.0.19041-SP0 - Python version: 3.8.9 - PyTorch version (GPU?): 1.12.1+cpu (False) - Huggingface_hub version: 0.9.1 - Transformers version: 4.21.3 - Using GPU in script?: no - Using distributed or parallel set-up in script?: Don't understand the question, but seems irrelevant.
[ { "content": "# coding=utf-8\n# Copyright 2023 Optuna, Hugging Face\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Logging utilities.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport threading\nfrom logging import (\n CRITICAL, # NOQA\n DEBUG, # NOQA\n ERROR, # NOQA\n FATAL, # NOQA\n INFO, # NOQA\n NOTSET, # NOQA\n WARN, # NOQA\n WARNING, # NOQA\n)\nfrom typing import Dict, Optional\n\nfrom tqdm import auto as tqdm_lib\n\n\n_lock = threading.Lock()\n_default_handler: Optional[logging.Handler] = None\n\nlog_levels = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n}\n\n_default_log_level = logging.WARNING\n\n_tqdm_active = True\n\n\ndef _get_default_logging_level() -> int:\n \"\"\"\n If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is\n not - fall back to `_default_log_level`\n \"\"\"\n env_level_str = os.getenv(\"DIFFUSERS_VERBOSITY\", None)\n if env_level_str:\n if env_level_str in log_levels:\n return log_levels[env_level_str]\n else:\n logging.getLogger().warning(\n f\"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, \"\n f\"has to be one of: { ', '.join(log_levels.keys()) }\"\n )\n return _default_log_level\n\n\ndef _get_library_name() -> str:\n return __name__.split(\".\")[0]\n\n\ndef _get_library_root_logger() -> logging.Logger:\n return logging.getLogger(_get_library_name())\n\n\ndef _configure_library_root_logger() -> None:\n global _default_handler\n\n with _lock:\n if _default_handler:\n # This library has already configured the library root logger.\n return\n _default_handler = logging.StreamHandler() # Set sys.stderr as stream.\n _default_handler.flush = sys.stderr.flush\n\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(_default_handler)\n library_root_logger.setLevel(_get_default_logging_level())\n library_root_logger.propagate = False\n\n\ndef _reset_library_root_logger() -> None:\n global _default_handler\n\n with _lock:\n if not _default_handler:\n return\n\n library_root_logger = _get_library_root_logger()\n library_root_logger.removeHandler(_default_handler)\n library_root_logger.setLevel(logging.NOTSET)\n _default_handler = None\n\n\ndef get_log_levels_dict() -> Dict[str, int]:\n return log_levels\n\n\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\n \"\"\"\n Return a logger with the specified name.\n\n This function is not supposed to be directly accessed unless you are writing a custom diffusers module.\n \"\"\"\n\n if name is None:\n name = _get_library_name()\n\n _configure_library_root_logger()\n return logging.getLogger(name)\n\n\ndef get_verbosity() -> int:\n \"\"\"\n Return the current level for the 🤗 Diffusers' root logger as an `int`.\n\n Returns:\n `int`:\n Logging level integers which can be one of:\n\n - `50`: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL`\n - `40`: `diffusers.logging.ERROR`\n - `30`: `diffusers.logging.WARNING` or `diffusers.logging.WARN`\n - `20`: `diffusers.logging.INFO`\n - `10`: `diffusers.logging.DEBUG`\n\n \"\"\"\n\n _configure_library_root_logger()\n return _get_library_root_logger().getEffectiveLevel()\n\n\ndef set_verbosity(verbosity: int) -> None:\n \"\"\"\n Set the verbosity level for the 🤗 Diffusers' root logger.\n\n Args:\n verbosity (`int`):\n Logging level which can be one of:\n\n - `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL`\n - `diffusers.logging.ERROR`\n - `diffusers.logging.WARNING` or `diffusers.logging.WARN`\n - `diffusers.logging.INFO`\n - `diffusers.logging.DEBUG`\n \"\"\"\n\n _configure_library_root_logger()\n _get_library_root_logger().setLevel(verbosity)\n\n\ndef set_verbosity_info() -> None:\n \"\"\"Set the verbosity to the `INFO` level.\"\"\"\n return set_verbosity(INFO)\n\n\ndef set_verbosity_warning() -> None:\n \"\"\"Set the verbosity to the `WARNING` level.\"\"\"\n return set_verbosity(WARNING)\n\n\ndef set_verbosity_debug() -> None:\n \"\"\"Set the verbosity to the `DEBUG` level.\"\"\"\n return set_verbosity(DEBUG)\n\n\ndef set_verbosity_error() -> None:\n \"\"\"Set the verbosity to the `ERROR` level.\"\"\"\n return set_verbosity(ERROR)\n\n\ndef disable_default_handler() -> None:\n \"\"\"Disable the default handler of the 🤗 Diffusers' root logger.\"\"\"\n\n _configure_library_root_logger()\n\n assert _default_handler is not None\n _get_library_root_logger().removeHandler(_default_handler)\n\n\ndef enable_default_handler() -> None:\n \"\"\"Enable the default handler of the 🤗 Diffusers' root logger.\"\"\"\n\n _configure_library_root_logger()\n\n assert _default_handler is not None\n _get_library_root_logger().addHandler(_default_handler)\n\n\ndef add_handler(handler: logging.Handler) -> None:\n \"\"\"adds a handler to the HuggingFace Diffusers' root logger.\"\"\"\n\n _configure_library_root_logger()\n\n assert handler is not None\n _get_library_root_logger().addHandler(handler)\n\n\ndef remove_handler(handler: logging.Handler) -> None:\n \"\"\"removes given handler from the HuggingFace Diffusers' root logger.\"\"\"\n\n _configure_library_root_logger()\n\n assert handler is not None and handler not in _get_library_root_logger().handlers\n _get_library_root_logger().removeHandler(handler)\n\n\ndef disable_propagation() -> None:\n \"\"\"\n Disable propagation of the library log outputs. Note that log propagation is disabled by default.\n \"\"\"\n\n _configure_library_root_logger()\n _get_library_root_logger().propagate = False\n\n\ndef enable_propagation() -> None:\n \"\"\"\n Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent\n double logging if the root logger has been configured.\n \"\"\"\n\n _configure_library_root_logger()\n _get_library_root_logger().propagate = True\n\n\ndef enable_explicit_format() -> None:\n \"\"\"\n Enable explicit formatting for every 🤗 Diffusers' logger. The explicit formatter is as follows:\n ```\n [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE\n ```\n All handlers currently bound to the root logger are affected by this method.\n \"\"\"\n handlers = _get_library_root_logger().handlers\n\n for handler in handlers:\n formatter = logging.Formatter(\"[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s\")\n handler.setFormatter(formatter)\n\n\ndef reset_format() -> None:\n \"\"\"\n Resets the formatting for 🤗 Diffusers' loggers.\n\n All handlers currently bound to the root logger are affected by this method.\n \"\"\"\n handlers = _get_library_root_logger().handlers\n\n for handler in handlers:\n handler.setFormatter(None)\n\n\ndef warning_advice(self, *args, **kwargs) -> None:\n \"\"\"\n This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this\n warning will not be printed\n \"\"\"\n no_advisory_warnings = os.getenv(\"DIFFUSERS_NO_ADVISORY_WARNINGS\", False)\n if no_advisory_warnings:\n return\n self.warning(*args, **kwargs)\n\n\nlogging.Logger.warning_advice = warning_advice\n\n\nclass EmptyTqdm:\n \"\"\"Dummy tqdm which doesn't do anything.\"\"\"\n\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n self._iterator = args[0] if args else None\n\n def __iter__(self):\n return iter(self._iterator)\n\n def __getattr__(self, _):\n \"\"\"Return empty function.\"\"\"\n\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n return\n\n return empty_fn\n\n def __enter__(self):\n return self\n\n def __exit__(self, type_, value, traceback):\n return\n\n\nclass _tqdm_cls:\n def __call__(self, *args, **kwargs):\n if _tqdm_active:\n return tqdm_lib.tqdm(*args, **kwargs)\n else:\n return EmptyTqdm(*args, **kwargs)\n\n def set_lock(self, *args, **kwargs):\n self._lock = None\n if _tqdm_active:\n return tqdm_lib.tqdm.set_lock(*args, **kwargs)\n\n def get_lock(self):\n if _tqdm_active:\n return tqdm_lib.tqdm.get_lock()\n\n\ntqdm = _tqdm_cls()\n\n\ndef is_progress_bar_enabled() -> bool:\n \"\"\"Return a boolean indicating whether tqdm progress bars are enabled.\"\"\"\n global _tqdm_active\n return bool(_tqdm_active)\n\n\ndef enable_progress_bar() -> None:\n \"\"\"Enable tqdm progress bar.\"\"\"\n global _tqdm_active\n _tqdm_active = True\n\n\ndef disable_progress_bar() -> None:\n \"\"\"Disable tqdm progress bar.\"\"\"\n global _tqdm_active\n _tqdm_active = False\n", "path": "src/diffusers/utils/logging.py" } ]
[ { "content": "# coding=utf-8\n# Copyright 2023 Optuna, Hugging Face\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Logging utilities.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport threading\nfrom logging import (\n CRITICAL, # NOQA\n DEBUG, # NOQA\n ERROR, # NOQA\n FATAL, # NOQA\n INFO, # NOQA\n NOTSET, # NOQA\n WARN, # NOQA\n WARNING, # NOQA\n)\nfrom typing import Dict, Optional\n\nfrom tqdm import auto as tqdm_lib\n\n\n_lock = threading.Lock()\n_default_handler: Optional[logging.Handler] = None\n\nlog_levels = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n}\n\n_default_log_level = logging.WARNING\n\n_tqdm_active = True\n\n\ndef _get_default_logging_level() -> int:\n \"\"\"\n If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is\n not - fall back to `_default_log_level`\n \"\"\"\n env_level_str = os.getenv(\"DIFFUSERS_VERBOSITY\", None)\n if env_level_str:\n if env_level_str in log_levels:\n return log_levels[env_level_str]\n else:\n logging.getLogger().warning(\n f\"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, \"\n f\"has to be one of: { ', '.join(log_levels.keys()) }\"\n )\n return _default_log_level\n\n\ndef _get_library_name() -> str:\n return __name__.split(\".\")[0]\n\n\ndef _get_library_root_logger() -> logging.Logger:\n return logging.getLogger(_get_library_name())\n\n\ndef _configure_library_root_logger() -> None:\n global _default_handler\n\n with _lock:\n if _default_handler:\n # This library has already configured the library root logger.\n return\n _default_handler = logging.StreamHandler() # Set sys.stderr as stream.\n _default_handler.flush = sys.stderr.flush\n\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(_default_handler)\n library_root_logger.setLevel(_get_default_logging_level())\n library_root_logger.propagate = False\n\n\ndef _reset_library_root_logger() -> None:\n global _default_handler\n\n with _lock:\n if not _default_handler:\n return\n\n library_root_logger = _get_library_root_logger()\n library_root_logger.removeHandler(_default_handler)\n library_root_logger.setLevel(logging.NOTSET)\n _default_handler = None\n\n\ndef get_log_levels_dict() -> Dict[str, int]:\n return log_levels\n\n\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\n \"\"\"\n Return a logger with the specified name.\n\n This function is not supposed to be directly accessed unless you are writing a custom diffusers module.\n \"\"\"\n\n if name is None:\n name = _get_library_name()\n\n _configure_library_root_logger()\n return logging.getLogger(name)\n\n\ndef get_verbosity() -> int:\n \"\"\"\n Return the current level for the 🤗 Diffusers' root logger as an `int`.\n\n Returns:\n `int`:\n Logging level integers which can be one of:\n\n - `50`: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL`\n - `40`: `diffusers.logging.ERROR`\n - `30`: `diffusers.logging.WARNING` or `diffusers.logging.WARN`\n - `20`: `diffusers.logging.INFO`\n - `10`: `diffusers.logging.DEBUG`\n\n \"\"\"\n\n _configure_library_root_logger()\n return _get_library_root_logger().getEffectiveLevel()\n\n\ndef set_verbosity(verbosity: int) -> None:\n \"\"\"\n Set the verbosity level for the 🤗 Diffusers' root logger.\n\n Args:\n verbosity (`int`):\n Logging level which can be one of:\n\n - `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL`\n - `diffusers.logging.ERROR`\n - `diffusers.logging.WARNING` or `diffusers.logging.WARN`\n - `diffusers.logging.INFO`\n - `diffusers.logging.DEBUG`\n \"\"\"\n\n _configure_library_root_logger()\n _get_library_root_logger().setLevel(verbosity)\n\n\ndef set_verbosity_info() -> None:\n \"\"\"Set the verbosity to the `INFO` level.\"\"\"\n return set_verbosity(INFO)\n\n\ndef set_verbosity_warning() -> None:\n \"\"\"Set the verbosity to the `WARNING` level.\"\"\"\n return set_verbosity(WARNING)\n\n\ndef set_verbosity_debug() -> None:\n \"\"\"Set the verbosity to the `DEBUG` level.\"\"\"\n return set_verbosity(DEBUG)\n\n\ndef set_verbosity_error() -> None:\n \"\"\"Set the verbosity to the `ERROR` level.\"\"\"\n return set_verbosity(ERROR)\n\n\ndef disable_default_handler() -> None:\n \"\"\"Disable the default handler of the 🤗 Diffusers' root logger.\"\"\"\n\n _configure_library_root_logger()\n\n assert _default_handler is not None\n _get_library_root_logger().removeHandler(_default_handler)\n\n\ndef enable_default_handler() -> None:\n \"\"\"Enable the default handler of the 🤗 Diffusers' root logger.\"\"\"\n\n _configure_library_root_logger()\n\n assert _default_handler is not None\n _get_library_root_logger().addHandler(_default_handler)\n\n\ndef add_handler(handler: logging.Handler) -> None:\n \"\"\"adds a handler to the HuggingFace Diffusers' root logger.\"\"\"\n\n _configure_library_root_logger()\n\n assert handler is not None\n _get_library_root_logger().addHandler(handler)\n\n\ndef remove_handler(handler: logging.Handler) -> None:\n \"\"\"removes given handler from the HuggingFace Diffusers' root logger.\"\"\"\n\n _configure_library_root_logger()\n\n assert handler is not None and handler in _get_library_root_logger().handlers\n _get_library_root_logger().removeHandler(handler)\n\n\ndef disable_propagation() -> None:\n \"\"\"\n Disable propagation of the library log outputs. Note that log propagation is disabled by default.\n \"\"\"\n\n _configure_library_root_logger()\n _get_library_root_logger().propagate = False\n\n\ndef enable_propagation() -> None:\n \"\"\"\n Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent\n double logging if the root logger has been configured.\n \"\"\"\n\n _configure_library_root_logger()\n _get_library_root_logger().propagate = True\n\n\ndef enable_explicit_format() -> None:\n \"\"\"\n Enable explicit formatting for every 🤗 Diffusers' logger. The explicit formatter is as follows:\n ```\n [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE\n ```\n All handlers currently bound to the root logger are affected by this method.\n \"\"\"\n handlers = _get_library_root_logger().handlers\n\n for handler in handlers:\n formatter = logging.Formatter(\"[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s\")\n handler.setFormatter(formatter)\n\n\ndef reset_format() -> None:\n \"\"\"\n Resets the formatting for 🤗 Diffusers' loggers.\n\n All handlers currently bound to the root logger are affected by this method.\n \"\"\"\n handlers = _get_library_root_logger().handlers\n\n for handler in handlers:\n handler.setFormatter(None)\n\n\ndef warning_advice(self, *args, **kwargs) -> None:\n \"\"\"\n This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this\n warning will not be printed\n \"\"\"\n no_advisory_warnings = os.getenv(\"DIFFUSERS_NO_ADVISORY_WARNINGS\", False)\n if no_advisory_warnings:\n return\n self.warning(*args, **kwargs)\n\n\nlogging.Logger.warning_advice = warning_advice\n\n\nclass EmptyTqdm:\n \"\"\"Dummy tqdm which doesn't do anything.\"\"\"\n\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n self._iterator = args[0] if args else None\n\n def __iter__(self):\n return iter(self._iterator)\n\n def __getattr__(self, _):\n \"\"\"Return empty function.\"\"\"\n\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n return\n\n return empty_fn\n\n def __enter__(self):\n return self\n\n def __exit__(self, type_, value, traceback):\n return\n\n\nclass _tqdm_cls:\n def __call__(self, *args, **kwargs):\n if _tqdm_active:\n return tqdm_lib.tqdm(*args, **kwargs)\n else:\n return EmptyTqdm(*args, **kwargs)\n\n def set_lock(self, *args, **kwargs):\n self._lock = None\n if _tqdm_active:\n return tqdm_lib.tqdm.set_lock(*args, **kwargs)\n\n def get_lock(self):\n if _tqdm_active:\n return tqdm_lib.tqdm.get_lock()\n\n\ntqdm = _tqdm_cls()\n\n\ndef is_progress_bar_enabled() -> bool:\n \"\"\"Return a boolean indicating whether tqdm progress bars are enabled.\"\"\"\n global _tqdm_active\n return bool(_tqdm_active)\n\n\ndef enable_progress_bar() -> None:\n \"\"\"Enable tqdm progress bar.\"\"\"\n global _tqdm_active\n _tqdm_active = True\n\n\ndef disable_progress_bar() -> None:\n \"\"\"Disable tqdm progress bar.\"\"\"\n global _tqdm_active\n _tqdm_active = False\n", "path": "src/diffusers/utils/logging.py" } ]
diff --git a/src/diffusers/utils/logging.py b/src/diffusers/utils/logging.py index 6050f314c008..7945db333cab 100644 --- a/src/diffusers/utils/logging.py +++ b/src/diffusers/utils/logging.py @@ -213,7 +213,7 @@ def remove_handler(handler: logging.Handler) -> None: _configure_library_root_logger() - assert handler is not None and handler not in _get_library_root_logger().handlers + assert handler is not None and handler in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler)
e2nIEE__pandapower-2106
[bug] Pandapower interferes with matplotlib savefig ### Bug report checklis - [X] Searched the [issues page](https://github.com/e2nIEE/pandapower/issues) for similar reports - [X] Read the relevant sections of the [documentation](https://pandapower.readthedocs.io/en/latest/about.html) - [X] Browse the [tutorials](https://github.com/e2nIEE/pandapower/tree/develop/tutorials) and [tests](https://github.com/e2nIEE/pandapower/tree/develop/pandapower/test) for usefull code snippets and examples of use - [X] Reproduced the issue after updating with `pip install --upgrade pandapower` (or `git pull`) - [X] Tried basic troubleshooting (if a bug/error) like restarting the interpreter and checking the pythonpath ### Reproducible Example ```python import matplotlib.pyplot as plt import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` ### Issue Description and Traceback When pandapower is imported, matplotlib `savefig()` may run into a bug where the `GraphicsContextBase._capstyle` is set to a `str` instead of a `CapStyle` instance. Calling the proper `set_capstyle()` method solves this issue. Also, somehow, this issue does not arise when calling `fig.savefig('test.png')`. It only arises when the figure save type is SVG. The following code works fine. Notice that I have commented out `import pandapower`: ```python import matplotlib.pyplot as plt # import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` However, if I uncomment the `import pandapower` line, then I will get an error: ```python import matplotlib.pyplot as plt import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` Error: ``` Traceback (most recent call last): File "/home/user/testenv/test.py", line 6, in <module> fig.savefig('test.svg') File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py", line 3378, in savefig self.canvas.print_figure(fname, **kwargs) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py", line 2366, in print_figure result = print_method( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py", line 2232, in <lambda> print_method = functools.wraps(meth)(lambda *args, **kwargs: meth( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py", line 1369, in print_svg self.figure.draw(renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 95, in draw_wrapper result = draw(artist, renderer, *args, **kwargs) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper return draw(artist, renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py", line 3175, in draw mimage._draw_list_compositing_images( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py", line 131, in _draw_list_compositing_images a.draw(renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper return draw(artist, renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/axes/_base.py", line 3064, in draw mimage._draw_list_compositing_images( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py", line 131, in _draw_list_compositing_images a.draw(renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper return draw(artist, renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py", line 972, in draw super().draw(renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper return draw(artist, renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py", line 405, in draw renderer.draw_markers( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py", line 717, in draw_markers style = self._get_style_dict(gc, rgbFace) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py", line 609, in _get_style_dict if gc.get_capstyle() != 'butt': File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py", line 820, in get_capstyle return self._capstyle.name AttributeError: 'str' object has no attribute 'name' ``` ### Expected Behavior I would expect the following 2 code blocks to produce identical (or at least similar) results: ```python import matplotlib.pyplot as plt # import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` and ```python import matplotlib.pyplot as plt import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` The 1st code block works fine, whereas the 2nd code block throws an `AttributeError`. ### Installed Versions OS: Ubuntu 22.04 LTS Python 3.10 Matplotlib 3.7.2 Pandapower 2.13.1 ### Label - [X] Relevant labels are selected
[ { "content": "from pandapower.plotting.collections import *\nfrom pandapower.plotting.colormaps import *\nfrom pandapower.plotting.generic_geodata import *\nfrom pandapower.plotting.powerflow_results import *\nfrom pandapower.plotting.simple_plot import *\nfrom pandapower.plotting.plotly import *\nfrom pandapower.plotting.geo import *\nfrom pandapower.plotting.plotting_toolbox import set_line_geodata_from_bus_geodata\nfrom pandapower.plotting.to_html import to_html\n\nimport types\n\ntry:\n from matplotlib.backend_bases import GraphicsContextBase, RendererBase\n\n class GC(GraphicsContextBase):\n def __init__(self):\n super().__init__()\n self._capstyle = 'round'\n\n def custom_new_gc(self):\n return GC()\n\n RendererBase.new_gc = types.MethodType(custom_new_gc, RendererBase)\n\nexcept ImportError:\n pass\n", "path": "pandapower/plotting/__init__.py" } ]
[ { "content": "from pandapower.plotting.collections import *\nfrom pandapower.plotting.colormaps import *\nfrom pandapower.plotting.generic_geodata import *\nfrom pandapower.plotting.powerflow_results import *\nfrom pandapower.plotting.simple_plot import *\nfrom pandapower.plotting.plotly import *\nfrom pandapower.plotting.geo import *\nfrom pandapower.plotting.plotting_toolbox import set_line_geodata_from_bus_geodata\nfrom pandapower.plotting.to_html import to_html\n\nimport types\n\ntry:\n from matplotlib.backend_bases import GraphicsContextBase, RendererBase\n\n class GC(GraphicsContextBase):\n def __init__(self):\n super().__init__()\n self.set_capstyle('round')\n\n def custom_new_gc(self):\n return GC()\n\n RendererBase.new_gc = types.MethodType(custom_new_gc, RendererBase)\n\nexcept ImportError:\n pass\n", "path": "pandapower/plotting/__init__.py" } ]
diff --git a/pandapower/plotting/__init__.py b/pandapower/plotting/__init__.py index 8b71f2bea..11a5090a4 100644 --- a/pandapower/plotting/__init__.py +++ b/pandapower/plotting/__init__.py @@ -16,7 +16,7 @@ class GC(GraphicsContextBase): def __init__(self): super().__init__() - self._capstyle = 'round' + self.set_capstyle('round') def custom_new_gc(self): return GC()
gammapy__gammapy-3099
Error in executing `FluxPoints.to_sed_type()` **Gammapy version** Present dev **Bug description** Calling `FluxPoints.to_sed_type()` returns an error ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-225-40a5362800d4> in <module> ----> 1 flux_points_fermi = FluxPoints(fgl_src.flux_points.table).to_sed_type("dnde", model=fgl_src.sky_model().spectral_model) 2 fpds_fermi = FluxPointsDataset( 3 data=flux_points_fermi, models=fgl_src.sky_model() 4 ) ~/Gammapy-dev/gammapy/gammapy/estimators/flux_point.py in to_sed_type(self, sed_type, method, model, pwl_approx) 380 else: 381 raise ValueError(f"Invalid method: {method}") --> 382 table = self._flux_to_dnde(e_ref, table, model, pwl_approx) 383 384 elif self.sed_type == "dnde" and sed_type == "e2dnde": ~/Gammapy-dev/gammapy/gammapy/estimators/flux_point.py in _flux_to_dnde(self, e_ref, table, model, pwl_approx) 277 278 flux = table["flux"].quantity --> 279 dnde = self._dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx) 280 281 # Add to result table ~/Gammapy-dev/gammapy/gammapy/estimators/flux_point.py in _dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx) 436 ) 437 else: --> 438 flux_model = model.integral(e_min, e_max, intervals=True) 439 440 return dnde_model * (flux / flux_model) ~/Gammapy-dev/gammapy/gammapy/modeling/models/spectral.py in integral(self, emin, emax, **kwargs) 167 return self.evaluate_integral(emin, emax, **kwargs) 168 else: --> 169 return integrate_spectrum(self, emin, emax, **kwargs) 170 171 def integral_error(self, emin, emax): TypeError: integrate_spectrum() got an unexpected keyword argument 'intervals' ``` **To Reproduce** ``` from gammapy.catalog import CATALOG_REGISTRY catalog_4fgl = CATALOG_REGISTRY.get_cls("4fgl")() fgl_src = catalog_4fgl["FGES J1553.8-5325"] flux_points_fermi = FluxPoints(fgl_src.flux_points.table).to_sed_type("dnde", model=fgl_src.sky_model().spectral_model) ``` Error in executing `FluxPoints.to_sed_type()` **Gammapy version** Present dev **Bug description** Calling `FluxPoints.to_sed_type()` returns an error ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-225-40a5362800d4> in <module> ----> 1 flux_points_fermi = FluxPoints(fgl_src.flux_points.table).to_sed_type("dnde", model=fgl_src.sky_model().spectral_model) 2 fpds_fermi = FluxPointsDataset( 3 data=flux_points_fermi, models=fgl_src.sky_model() 4 ) ~/Gammapy-dev/gammapy/gammapy/estimators/flux_point.py in to_sed_type(self, sed_type, method, model, pwl_approx) 380 else: 381 raise ValueError(f"Invalid method: {method}") --> 382 table = self._flux_to_dnde(e_ref, table, model, pwl_approx) 383 384 elif self.sed_type == "dnde" and sed_type == "e2dnde": ~/Gammapy-dev/gammapy/gammapy/estimators/flux_point.py in _flux_to_dnde(self, e_ref, table, model, pwl_approx) 277 278 flux = table["flux"].quantity --> 279 dnde = self._dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx) 280 281 # Add to result table ~/Gammapy-dev/gammapy/gammapy/estimators/flux_point.py in _dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx) 436 ) 437 else: --> 438 flux_model = model.integral(e_min, e_max, intervals=True) 439 440 return dnde_model * (flux / flux_model) ~/Gammapy-dev/gammapy/gammapy/modeling/models/spectral.py in integral(self, emin, emax, **kwargs) 167 return self.evaluate_integral(emin, emax, **kwargs) 168 else: --> 169 return integrate_spectrum(self, emin, emax, **kwargs) 170 171 def integral_error(self, emin, emax): TypeError: integrate_spectrum() got an unexpected keyword argument 'intervals' ``` **To Reproduce** ``` from gammapy.catalog import CATALOG_REGISTRY catalog_4fgl = CATALOG_REGISTRY.get_cls("4fgl")() fgl_src = catalog_4fgl["FGES J1553.8-5325"] flux_points_fermi = FluxPoints(fgl_src.flux_points.table).to_sed_type("dnde", model=fgl_src.sky_model().spectral_model) ```
[ { "content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.io.registry import IORegistryError\nfrom astropy.table import Table, vstack\nfrom gammapy.datasets import Datasets\nfrom gammapy.modeling.models import PowerLawSpectralModel\nfrom gammapy.utils.interpolation import interpolate_profile\nfrom gammapy.utils.scripts import make_path\nfrom gammapy.utils.table import table_from_row_data, table_standardise_units_copy\nfrom .core import Estimator\nfrom .flux import FluxEstimator\n\n\n__all__ = [\"FluxPoints\", \"FluxPointsEstimator\"]\n\nlog = logging.getLogger(__name__)\n\nREQUIRED_COLUMNS = {\n \"dnde\": [\"e_ref\", \"dnde\"],\n \"e2dnde\": [\"e_ref\", \"e2dnde\"],\n \"flux\": [\"e_min\", \"e_max\", \"flux\"],\n \"eflux\": [\"e_min\", \"e_max\", \"eflux\"],\n # TODO: extend required columns\n \"likelihood\": [\n \"e_min\",\n \"e_max\",\n \"e_ref\",\n \"ref_dnde\",\n \"norm\",\n ],\n}\n\nOPTIONAL_COLUMNS = {\n \"dnde\": [\"dnde_err\", \"dnde_errp\", \"dnde_errn\", \"dnde_ul\", \"is_ul\"],\n \"e2dnde\": [\"e2dnde_err\", \"e2dnde_errp\", \"e2dnde_errn\", \"e2dnde_ul\", \"is_ul\"],\n \"flux\": [\"flux_err\", \"flux_errp\", \"flux_errn\", \"flux_ul\", \"is_ul\"],\n \"eflux\": [\"eflux_err\", \"eflux_errp\", \"eflux_errn\", \"eflux_ul\", \"is_ul\"],\n \"likelihood\": [\"norm_scan\", \"stat_scan\"],\n}\n\nDEFAULT_UNIT = {\n \"dnde\": u.Unit(\"cm-2 s-1 TeV-1\"),\n \"e2dnde\": u.Unit(\"erg cm-2 s-1\"),\n \"flux\": u.Unit(\"cm-2 s-1\"),\n \"eflux\": u.Unit(\"erg cm-2 s-1\"),\n}\n\n\nclass FluxPoints:\n \"\"\"Flux points container.\n\n The supported formats are described here: :ref:`gadf:flux-points`\n\n In summary, the following formats and minimum required columns are:\n\n * Format ``dnde``: columns ``e_ref`` and ``dnde``\n * Format ``e2dnde``: columns ``e_ref``, ``e2dnde``\n * Format ``flux``: columns ``e_min``, ``e_max``, ``flux``\n * Format ``eflux``: columns ``e_min``, ``e_max``, ``eflux``\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table with flux point data\n\n Attributes\n ----------\n table : `~astropy.table.Table`\n Table with flux point data\n\n Examples\n --------\n The `FluxPoints` object is most easily created by reading a file with\n flux points given in one of the formats documented above::\n\n from gammapy.estimators import FluxPoints\n filename = '$GAMMAPY_DATA/hawc_crab/HAWC19_flux_points.fits'\n flux_points = FluxPoints.read(filename)\n flux_points.plot()\n\n An instance of `FluxPoints` can also be created by passing an instance of\n `astropy.table.Table`, which contains the required columns, such as `'e_ref'`\n and `'dnde'`. The corresponding `sed_type` has to be defined in the meta data\n of the table::\n\n from astropy import units as u\n from astropy.table import Table\n from gammapy.estimators import FluxPoints\n from gammapy.modeling.models import PowerLawSpectralModel\n\n table = Table()\n pwl = PowerLawSpectralModel()\n e_ref = np.logspace(0, 2, 7) * u.TeV\n table['e_ref'] = e_ref\n table['dnde'] = pwl(e_ref)\n table.meta['SED_TYPE'] = 'dnde'\n\n flux_points = FluxPoints(table)\n flux_points.plot()\n\n If you have flux points in a different data format, the format can be changed\n by renaming the table columns and adding meta data::\n\n\n from astropy import units as u\n from astropy.table import Table\n from gammapy.estimators import FluxPoints\n\n table = Table.read('$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points_ctb_37b.txt',\n format='ascii.csv', delimiter=' ', comment='#')\n table.meta['SED_TYPE'] = 'dnde'\n table.rename_column('Differential_Flux', 'dnde')\n table['dnde'].unit = 'cm-2 s-1 TeV-1'\n\n table.rename_column('lower_error', 'dnde_errn')\n table['dnde_errn'].unit = 'cm-2 s-1 TeV-1'\n\n table.rename_column('upper_error', 'dnde_errp')\n table['dnde_errp'].unit = 'cm-2 s-1 TeV-1'\n\n table.rename_column('E', 'e_ref')\n table['e_ref'].unit = 'TeV'\n\n flux_points = FluxPoints(table)\n flux_points.plot()\n\n Note: In order to reproduce the example you need the tests datasets folder.\n You may download it with the command\n ``gammapy download datasets --tests --out $GAMMAPY_DATA``\n \"\"\"\n\n def __init__(self, table):\n self.table = table_standardise_units_copy(table)\n # validate that the table is a valid representation\n # of the given flux point sed type\n self._validate_table(self.table, table.meta[\"SED_TYPE\"])\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(sed_type={self.sed_type!r}, n_points={len(self.table)})\"\n\n @property\n def table_formatted(self):\n \"\"\"Return formatted version of the flux points table. Used for pretty printing\"\"\"\n table = self.table.copy()\n\n for column in table.colnames:\n if column.startswith((\"dnde\", \"eflux\", \"flux\", \"e2dnde\", \"ref\")):\n table[column].format = \".3e\"\n elif column.startswith(\n (\"e_min\", \"e_max\", \"e_ref\", \"sqrt_ts\", \"norm\", \"ts\", \"stat\")\n ):\n table[column].format = \".3f\"\n\n return table\n\n @classmethod\n def read(cls, filename, **kwargs):\n \"\"\"Read flux points.\n\n Parameters\n ----------\n filename : str\n Filename\n kwargs : dict\n Keyword arguments passed to `astropy.table.Table.read`.\n \"\"\"\n filename = make_path(filename)\n try:\n table = Table.read(filename, **kwargs)\n except IORegistryError:\n kwargs.setdefault(\"format\", \"ascii.ecsv\")\n table = Table.read(filename, **kwargs)\n\n if \"SED_TYPE\" not in table.meta.keys():\n sed_type = cls._guess_sed_type(table)\n table.meta[\"SED_TYPE\"] = sed_type\n\n # TODO: check sign and factor 2 here\n # https://github.com/gammapy/gammapy/pull/2546#issuecomment-554274318\n # The idea below is to support the format here:\n # https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/flux_points/index.html#likelihood-columns\n # but internally to go to the uniform \"stat\"\n\n if \"loglike\" in table.colnames and \"stat\" not in table.colnames:\n table[\"stat\"] = 2 * table[\"loglike\"]\n\n if \"loglike_null\" in table.colnames and \"stat_null\" not in table.colnames:\n table[\"stat_null\"] = 2 * table[\"loglike_null\"]\n\n if \"dloglike_scan\" in table.colnames and \"stat_scan\" not in table.colnames:\n table[\"stat_scan\"] = 2 * table[\"dloglike_scan\"]\n\n return cls(table=table)\n\n def write(self, filename, **kwargs):\n \"\"\"Write flux points.\n\n Parameters\n ----------\n filename : str\n Filename\n kwargs : dict\n Keyword arguments passed to `astropy.table.Table.write`.\n \"\"\"\n filename = make_path(filename)\n try:\n self.table.write(filename, **kwargs)\n except IORegistryError:\n kwargs.setdefault(\"format\", \"ascii.ecsv\")\n self.table.write(filename, **kwargs)\n\n @classmethod\n def stack(cls, flux_points):\n \"\"\"Create flux points by stacking list of flux points.\n\n The first `FluxPoints` object in the list is taken as a reference to infer\n column names and units for the stacked object.\n\n Parameters\n ----------\n flux_points : list of `FluxPoints`\n List of flux points to stack.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points without upper limit points.\n \"\"\"\n reference = flux_points[0].table\n\n tables = []\n for _ in flux_points:\n table = _.table\n for colname in reference.colnames:\n column = reference[colname]\n if column.unit:\n table[colname] = table[colname].quantity.to(column.unit)\n tables.append(table[reference.colnames])\n\n table_stacked = vstack(tables)\n table_stacked.meta[\"SED_TYPE\"] = reference.meta[\"SED_TYPE\"]\n\n return cls(table_stacked)\n\n def drop_ul(self):\n \"\"\"Drop upper limit flux points.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points with upper limit points removed.\n\n Examples\n --------\n >>> from gammapy.estimators import FluxPoints\n >>> filename = '$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points.fits'\n >>> flux_points = FluxPoints.read(filename)\n >>> print(flux_points)\n FluxPoints(sed_type=\"flux\", n_points=24)\n >>> print(flux_points.drop_ul())\n FluxPoints(sed_type=\"flux\", n_points=19)\n\n Note: In order to reproduce the example you need the tests datasets folder.\n You may download it with the command\n ``gammapy download datasets --tests --out $GAMMAPY_DATA``\n \"\"\"\n table_drop_ul = self.table[~self.is_ul]\n return self.__class__(table_drop_ul)\n\n def _flux_to_dnde(self, e_ref, table, model, pwl_approx):\n if model is None:\n model = PowerLawSpectralModel()\n\n e_min, e_max = self.e_min, self.e_max\n\n flux = table[\"flux\"].quantity\n dnde = self._dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx)\n\n # Add to result table\n table[\"e_ref\"] = e_ref\n table[\"dnde\"] = dnde\n\n if \"flux_err\" in table.colnames:\n table[\"dnde_err\"] = dnde * table[\"flux_err\"].quantity / flux\n\n if \"flux_errn\" in table.colnames:\n table[\"dnde_errn\"] = dnde * table[\"flux_errn\"].quantity / flux\n table[\"dnde_errp\"] = dnde * table[\"flux_errp\"].quantity / flux\n\n if \"flux_ul\" in table.colnames:\n flux_ul = table[\"flux_ul\"].quantity\n dnde_ul = self._dnde_from_flux(\n flux_ul, model, e_ref, e_min, e_max, pwl_approx\n )\n table[\"dnde_ul\"] = dnde_ul\n\n return table\n\n @staticmethod\n def _dnde_to_e2dnde(e_ref, table):\n for suffix in [\"\", \"_ul\", \"_err\", \"_errp\", \"_errn\"]:\n try:\n data = table[\"dnde\" + suffix].quantity\n table[\"e2dnde\" + suffix] = (e_ref ** 2 * data).to(\n DEFAULT_UNIT[\"e2dnde\"]\n )\n except KeyError:\n continue\n\n return table\n\n @staticmethod\n def _e2dnde_to_dnde(e_ref, table):\n for suffix in [\"\", \"_ul\", \"_err\", \"_errp\", \"_errn\"]:\n try:\n data = table[\"e2dnde\" + suffix].quantity\n table[\"dnde\" + suffix] = (data / e_ref ** 2).to(DEFAULT_UNIT[\"dnde\"])\n except KeyError:\n continue\n\n return table\n\n def to_sed_type(self, sed_type, method=\"log_center\", model=None, pwl_approx=False):\n \"\"\"Convert to a different SED type (return new `FluxPoints`).\n\n See: https://ui.adsabs.harvard.edu/abs/1995NIMPA.355..541L for details\n on the `'lafferty'` method.\n\n Parameters\n ----------\n sed_type : {'dnde'}\n SED type to convert to.\n model : `~gammapy.modeling.models.SpectralModel`\n Spectral model assumption. Note that the value of the amplitude parameter\n does not matter. Still it is recommended to use something with the right\n scale and units. E.g. `amplitude = 1e-12 * u.Unit('cm-2 s-1 TeV-1')`\n method : {'lafferty', 'log_center', 'table'}\n Flux points `e_ref` estimation method:\n\n * `'laferty'` Lafferty & Wyatt model-based e_ref\n * `'log_center'` log bin center e_ref\n * `'table'` using column 'e_ref' from input flux_points\n pwl_approx : bool\n Use local power law appoximation at e_ref to compute differential flux\n from the integral flux. This method is used by the Fermi-LAT catalogs.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points including differential quantity columns `dnde`\n and `dnde_err` (optional), `dnde_ul` (optional).\n\n Examples\n --------\n >>> from gammapy.estimators import FluxPoints\n >>> from gammapy.modeling.models import PowerLawSpectralModel\n >>> filename = '$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points.fits'\n >>> flux_points = FluxPoints.read(filename)\n >>> model = PowerLawSpectralModel(index=2.2)\n >>> flux_points_dnde = flux_points.to_sed_type('dnde', model=model)\n\n Note: In order to reproduce the example you need the tests datasets folder.\n You may download it with the command\n ``gammapy download datasets --tests --out $GAMMAPY_DATA``\n \"\"\"\n # TODO: implement other directions.\n table = self.table.copy()\n\n if self.sed_type == \"flux\" and sed_type == \"dnde\":\n # Compute e_ref\n if method == \"table\":\n e_ref = table[\"e_ref\"].quantity\n elif method == \"log_center\":\n e_ref = np.sqrt(self.e_min * self.e_max)\n elif method == \"lafferty\":\n # set e_ref that it represents the mean dnde in the given energy bin\n e_ref = self._e_ref_lafferty(model, self.e_min, self.e_max)\n else:\n raise ValueError(f\"Invalid method: {method}\")\n table = self._flux_to_dnde(e_ref, table, model, pwl_approx)\n\n elif self.sed_type == \"dnde\" and sed_type == \"e2dnde\":\n table = self._dnde_to_e2dnde(self.e_ref, table)\n\n elif self.sed_type == \"e2dnde\" and sed_type == \"dnde\":\n table = self._e2dnde_to_dnde(self.e_ref, table)\n\n elif self.sed_type == \"likelihood\" and sed_type in [\"dnde\", \"flux\", \"eflux\"]:\n for suffix in [\"\", \"_ul\", \"_err\", \"_errp\", \"_errn\"]:\n try:\n table[sed_type + suffix] = (\n table[\"ref_\" + sed_type] * table[\"norm\" + suffix]\n )\n except KeyError:\n continue\n elif self.sed_type == sed_type:\n # do nothing if the sed type is the same\n pass\n\n else:\n raise NotImplementedError\n\n table.meta[\"SED_TYPE\"] = sed_type\n return FluxPoints(table)\n\n @staticmethod\n def _e_ref_lafferty(model, e_min, e_max):\n \"\"\"Helper for `to_sed_type`.\n\n Compute e_ref that the value at e_ref corresponds\n to the mean value between e_min and e_max.\n \"\"\"\n flux = model.integral(e_min, e_max)\n dnde_mean = flux / (e_max - e_min)\n return model.inverse(dnde_mean)\n\n @staticmethod\n def _dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx):\n \"\"\"Helper for `to_sed_type`.\n\n Compute dnde under the assumption that flux equals expected\n flux from model.\n \"\"\"\n dnde_model = model(e_ref)\n\n if pwl_approx:\n index = model.spectral_index(e_ref)\n flux_model = PowerLawSpectralModel.evaluate_integral(\n emin=e_min,\n emax=e_max,\n index=index,\n reference=e_ref,\n amplitude=dnde_model,\n )\n else:\n flux_model = model.integral(e_min, e_max, intervals=True)\n\n return dnde_model * (flux / flux_model)\n\n @property\n def sed_type(self):\n \"\"\"SED type (str).\n\n One of: {'dnde', 'e2dnde', 'flux', 'eflux'}\n \"\"\"\n return self.table.meta[\"SED_TYPE\"]\n\n @staticmethod\n def _guess_sed_type(table):\n \"\"\"Guess SED type from table content.\"\"\"\n valid_sed_types = list(REQUIRED_COLUMNS.keys())\n for sed_type in valid_sed_types:\n required = set(REQUIRED_COLUMNS[sed_type])\n if required.issubset(table.colnames):\n return sed_type\n\n @staticmethod\n def _guess_sed_type_from_unit(unit):\n \"\"\"Guess SED type from unit.\"\"\"\n for sed_type, default_unit in DEFAULT_UNIT.items():\n if unit.is_equivalent(default_unit):\n return sed_type\n\n @staticmethod\n def _validate_table(table, sed_type):\n \"\"\"Validate input table.\"\"\"\n required = set(REQUIRED_COLUMNS[sed_type])\n\n if not required.issubset(table.colnames):\n missing = required.difference(table.colnames)\n raise ValueError(\n \"Missing columns for sed type '{}':\" \" {}\".format(sed_type, missing)\n )\n\n @staticmethod\n def _get_y_energy_unit(y_unit):\n \"\"\"Get energy part of the given y unit.\"\"\"\n try:\n return [_ for _ in y_unit.bases if _.physical_type == \"energy\"][0]\n except IndexError:\n return u.Unit(\"TeV\")\n\n def _plot_get_energy_err(self):\n \"\"\"Compute energy error for given sed type\"\"\"\n try:\n e_min = self.table[\"e_min\"].quantity\n e_max = self.table[\"e_max\"].quantity\n e_ref = self.e_ref\n x_err = ((e_ref - e_min), (e_max - e_ref))\n except KeyError:\n x_err = None\n return x_err\n\n def _plot_get_flux_err(self, sed_type=None):\n \"\"\"Compute flux error for given sed type\"\"\"\n try:\n # asymmetric error\n y_errn = self.table[sed_type + \"_errn\"].quantity\n y_errp = self.table[sed_type + \"_errp\"].quantity\n y_err = (y_errn, y_errp)\n except KeyError:\n try:\n # symmetric error\n y_err = self.table[sed_type + \"_err\"].quantity\n y_err = (y_err, y_err)\n except KeyError:\n # no error at all\n y_err = None\n return y_err\n\n @property\n def is_ul(self):\n try:\n return self.table[\"is_ul\"].data.astype(\"bool\")\n except KeyError:\n return np.isnan(self.table[self.sed_type])\n\n @property\n def e_ref(self):\n \"\"\"Reference energy.\n\n Defined by `e_ref` column in `FluxPoints.table` or computed as log\n center, if `e_min` and `e_max` columns are present in `FluxPoints.table`.\n\n Returns\n -------\n e_ref : `~astropy.units.Quantity`\n Reference energy.\n \"\"\"\n try:\n return self.table[\"e_ref\"].quantity\n except KeyError:\n return np.sqrt(self.e_min * self.e_max)\n\n @property\n def e_edges(self):\n \"\"\"Edges of the energy bin.\n\n Returns\n -------\n e_edges : `~astropy.units.Quantity`\n Energy edges.\n \"\"\"\n e_edges = list(self.e_min)\n e_edges += [self.e_max[-1]]\n return u.Quantity(e_edges, self.e_min.unit, copy=False)\n\n @property\n def e_min(self):\n \"\"\"Lower bound of energy bin.\n\n Defined by `e_min` column in `FluxPoints.table`.\n\n Returns\n -------\n e_min : `~astropy.units.Quantity`\n Lower bound of energy bin.\n \"\"\"\n return self.table[\"e_min\"].quantity\n\n @property\n def e_max(self):\n \"\"\"Upper bound of energy bin.\n\n Defined by ``e_max`` column in ``table``.\n\n Returns\n -------\n e_max : `~astropy.units.Quantity`\n Upper bound of energy bin.\n \"\"\"\n return self.table[\"e_max\"].quantity\n\n def plot(\n self, ax=None, energy_unit=\"TeV\", flux_unit=None, energy_power=0, **kwargs\n ):\n \"\"\"Plot flux points.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`\n Axis object to plot on.\n energy_unit : str, `~astropy.units.Unit`, optional\n Unit of the energy axis\n flux_unit : str, `~astropy.units.Unit`, optional\n Unit of the flux axis\n energy_power : int\n Power of energy to multiply y axis with\n kwargs : dict\n Keyword arguments passed to :func:`matplotlib.pyplot.errorbar`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis object\n \"\"\"\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n sed_type = self.sed_type\n y_unit = u.Unit(flux_unit or DEFAULT_UNIT[sed_type])\n\n y = self.table[sed_type].quantity.to(y_unit)\n x = self.e_ref.to(energy_unit)\n\n # get errors and ul\n is_ul = self.is_ul\n x_err_all = self._plot_get_energy_err()\n y_err_all = self._plot_get_flux_err(sed_type)\n\n # handle energy power\n e_unit = self._get_y_energy_unit(y_unit)\n y_unit = y.unit * e_unit ** energy_power\n y = (y * np.power(x, energy_power)).to(y_unit)\n\n y_err, x_err = None, None\n\n if y_err_all:\n y_errn = (y_err_all[0] * np.power(x, energy_power)).to(y_unit)\n y_errp = (y_err_all[1] * np.power(x, energy_power)).to(y_unit)\n y_err = (y_errn[~is_ul].to_value(y_unit), y_errp[~is_ul].to_value(y_unit))\n\n if x_err_all:\n x_errn, x_errp = x_err_all\n x_err = (\n x_errn[~is_ul].to_value(energy_unit),\n x_errp[~is_ul].to_value(energy_unit),\n )\n\n # set flux points plotting defaults\n kwargs.setdefault(\"marker\", \"+\")\n kwargs.setdefault(\"ls\", \"None\")\n\n ebar = ax.errorbar(\n x[~is_ul].value, y[~is_ul].value, yerr=y_err, xerr=x_err, **kwargs\n )\n\n if is_ul.any():\n if x_err_all:\n x_errn, x_errp = x_err_all\n x_err = (\n x_errn[is_ul].to_value(energy_unit),\n x_errp[is_ul].to_value(energy_unit),\n )\n\n y_ul = self.table[sed_type + \"_ul\"].quantity\n y_ul = (y_ul * np.power(x, energy_power)).to(y_unit)\n\n y_err = (0.5 * y_ul[is_ul].value, np.zeros_like(y_ul[is_ul].value))\n\n kwargs.setdefault(\"color\", ebar[0].get_color())\n\n # pop label keyword to avoid that it appears twice in the legend\n kwargs.pop(\"label\", None)\n ax.errorbar(\n x[is_ul].value,\n y_ul[is_ul].value,\n xerr=x_err,\n yerr=y_err,\n uplims=True,\n **kwargs,\n )\n\n ax.set_xscale(\"log\", nonposx=\"clip\")\n ax.set_yscale(\"log\", nonposy=\"clip\")\n ax.set_xlabel(f\"Energy ({energy_unit})\")\n ax.set_ylabel(f\"{self.sed_type} ({y_unit})\")\n return ax\n\n def plot_ts_profiles(\n self,\n ax=None,\n energy_unit=\"TeV\",\n add_cbar=True,\n y_values=None,\n y_unit=None,\n **kwargs,\n ):\n \"\"\"Plot fit statistic SED profiles as a density plot.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`\n Axis object to plot on.\n energy_unit : str, `~astropy.units.Unit`, optional\n Unit of the energy axis\n y_values : `astropy.units.Quantity`\n Array of y-values to use for the fit statistic profile evaluation.\n y_unit : str or `astropy.units.Unit`\n Unit to use for the y-axis.\n add_cbar : bool\n Whether to add a colorbar to the plot.\n kwargs : dict\n Keyword arguments passed to :func:`matplotlib.pyplot.pcolormesh`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis object\n \"\"\"\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n self._validate_table(self.table, \"likelihood\")\n y_unit = u.Unit(y_unit or DEFAULT_UNIT[self.sed_type])\n\n if y_values is None:\n ref_values = self.table[\"ref_\" + self.sed_type].quantity\n y_values = np.logspace(\n np.log10(0.2 * ref_values.value.min()),\n np.log10(5 * ref_values.value.max()),\n 500,\n )\n y_values = u.Quantity(y_values, y_unit, copy=False)\n\n x = self.e_edges.to(energy_unit)\n\n # Compute fit statistic \"image\" one energy bin at a time\n # by interpolating e2dnde at the log bin centers\n z = np.empty((len(self.table), len(y_values)))\n for idx, row in enumerate(self.table):\n y_ref = self.table[\"ref_\" + self.sed_type].quantity[idx]\n norm = (y_values / y_ref).to_value(\"\")\n norm_scan = row[\"norm_scan\"]\n ts_scan = row[\"stat_scan\"] - row[\"stat\"]\n interp = interpolate_profile(norm_scan, ts_scan)\n z[idx] = interp((norm,))\n\n kwargs.setdefault(\"vmax\", 0)\n kwargs.setdefault(\"vmin\", -4)\n kwargs.setdefault(\"zorder\", 0)\n kwargs.setdefault(\"cmap\", \"Blues\")\n kwargs.setdefault(\"linewidths\", 0)\n\n # clipped values are set to NaN so that they appear white on the plot\n z[-z < kwargs[\"vmin\"]] = np.nan\n caxes = ax.pcolormesh(x.value, y_values.value, -z.T, **kwargs)\n ax.set_xscale(\"log\", nonposx=\"clip\")\n ax.set_yscale(\"log\", nonposy=\"clip\")\n ax.set_xlabel(f\"Energy ({energy_unit})\")\n ax.set_ylabel(f\"{self.sed_type} ({y_values.unit})\")\n\n if add_cbar:\n label = \"fit statistic difference\"\n ax.figure.colorbar(caxes, ax=ax, label=label)\n\n return ax\n\n\nclass FluxPointsEstimator(Estimator):\n \"\"\"Flux points estimator.\n\n Estimates flux points for a given list of datasets, energies and spectral model.\n\n To estimate the flux point the amplitude of the reference spectral model is\n fitted within the energy range defined by the energy group. This is done for\n each group independently. The amplitude is re-normalized using the \"norm\" parameter,\n which specifies the deviation of the flux from the reference model in this\n energy group. See https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/binned_likelihoods/index.html\n for details.\n\n The method is also described in the Fermi-LAT catalog paper\n https://ui.adsabs.harvard.edu/#abs/2015ApJS..218...23A\n or the HESS Galactic Plane Survey paper\n https://ui.adsabs.harvard.edu/#abs/2018A%26A...612A...1H\n\n Parameters\n ----------\n e_edges : `~astropy.units.Quantity`\n Energy edges of the flux point bins.\n source : str or int\n For which source in the model to compute the flux points.\n norm_min : float\n Minimum value for the norm used for the fit statistic profile evaluation.\n norm_max : float\n Maximum value for the norm used for the fit statistic profile evaluation.\n norm_n_values : int\n Number of norm values used for the fit statistic profile.\n norm_values : `numpy.ndarray`\n Array of norm values to be used for the fit statistic profile.\n n_sigma : int\n Number of sigma to use for asymmetric error computation. Default is 1.\n n_sigma_ul : int\n Number of sigma to use for upper limit computation. Default is 2.\n reoptimize : bool\n Re-optimize other free model parameters.\n selection_optional : list of str\n Which additional quantities to estimate. Available options are:\n\n * \"errn-errp\": estimate asymmetric errors on flux.\n * \"ul\": estimate upper limits.\n * \"norm-scan\": estimate fit statistic profiles.\n\n By default all steps are executed.\n \"\"\"\n\n tag = \"FluxPointsEstimator\"\n _available_selection_optional = [\"errn-errp\", \"ul\", \"scan\"]\n\n def __init__(\n self,\n e_edges=[1, 10] * u.TeV,\n source=0,\n norm_min=0.2,\n norm_max=5,\n norm_n_values=11,\n norm_values=None,\n n_sigma=1,\n n_sigma_ul=2,\n reoptimize=False,\n selection_optional=\"all\",\n ):\n self.e_edges = e_edges\n self.source = source\n self.norm_min = norm_min\n self.norm_max = norm_max\n self.norm_n_values = norm_n_values\n self.norm_values = norm_values\n self.n_sigma = n_sigma\n self.n_sigma_ul = n_sigma_ul\n self.reoptimize = reoptimize\n self.selection_optional = selection_optional\n\n def _flux_estimator(self, e_min, e_max):\n return FluxEstimator(\n source=self.source,\n e_min=e_min,\n e_max=e_max,\n norm_min=self.norm_min,\n norm_max=self.norm_max,\n norm_n_values=self.norm_n_values,\n norm_values=self.norm_values,\n n_sigma=self.n_sigma,\n n_sigma_ul=self.n_sigma_ul,\n reoptimize=self.reoptimize,\n selection_optional=self.selection_optional,\n\n )\n\n def run(self, datasets):\n \"\"\"Run the flux point estimator for all energy groups.\n\n Parameters\n ----------\n datasets : list of `~gammapy.datasets.Dataset`\n Datasets\n\n Returns\n -------\n flux_points : `FluxPoints`\n Estimated flux points.\n \"\"\"\n datasets = Datasets(datasets).copy()\n\n rows = []\n\n for e_min, e_max in zip(self.e_edges[:-1], self.e_edges[1:]):\n row = self.estimate_flux_point(datasets, e_min=e_min, e_max=e_max)\n rows.append(row)\n\n table = table_from_row_data(rows=rows, meta={\"SED_TYPE\": \"likelihood\"})\n\n #TODO: this should be changed once likelihood is fully supported\n return FluxPoints(table).to_sed_type(\"dnde\")\n\n def estimate_flux_point(self, datasets, e_min, e_max):\n \"\"\"Estimate flux point for a single energy group.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n e_min, e_max : `~astropy.units.Quantity`\n Energy bounds to compute the flux point for.\n\n Returns\n -------\n result : dict\n Dict with results for the flux point.\n \"\"\"\n result = self.estimate_counts(datasets, e_min=e_min, e_max=e_max)\n\n datasets = datasets.slice_by_energy(e_min=e_min, e_max=e_max)\n\n if len(datasets) > 0:\n # TODO: refactor energy handling of FluxEstimator?\n energy_axis = datasets[0].counts.geom.axes[\"energy\"]\n e_min, e_max = energy_axis.edges.min(), energy_axis.edges.max()\n\n fe = self._flux_estimator(e_min=e_min, e_max=e_max)\n\n result.update(fe.run(datasets=datasets))\n\n return result\n\n @staticmethod\n def estimate_counts(datasets, e_min, e_max):\n \"\"\"Estimate counts for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n e_min, e_max : `~astropy.units.Quantity`\n Energy bounds to compute the flux point for.\n\n Returns\n -------\n result : dict\n Dict with an array with one entry per dataset with counts for the flux point.\n \"\"\"\n counts = []\n\n for dataset in datasets:\n energy_mask = dataset.counts.geom.energy_mask(\n emin=e_min, emax=e_max, round_to_edge=True\n )\n mask = dataset.mask & energy_mask\n counts.append(dataset.counts.data[mask].sum())\n\n return {\"counts\": np.array(counts, dtype=int)}\n", "path": "gammapy/estimators/flux_point.py" } ]
[ { "content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.io.registry import IORegistryError\nfrom astropy.table import Table, vstack\nfrom gammapy.datasets import Datasets\nfrom gammapy.modeling.models import PowerLawSpectralModel\nfrom gammapy.utils.interpolation import interpolate_profile\nfrom gammapy.utils.scripts import make_path\nfrom gammapy.utils.table import table_from_row_data, table_standardise_units_copy\nfrom .core import Estimator\nfrom .flux import FluxEstimator\n\n\n__all__ = [\"FluxPoints\", \"FluxPointsEstimator\"]\n\nlog = logging.getLogger(__name__)\n\nREQUIRED_COLUMNS = {\n \"dnde\": [\"e_ref\", \"dnde\"],\n \"e2dnde\": [\"e_ref\", \"e2dnde\"],\n \"flux\": [\"e_min\", \"e_max\", \"flux\"],\n \"eflux\": [\"e_min\", \"e_max\", \"eflux\"],\n # TODO: extend required columns\n \"likelihood\": [\n \"e_min\",\n \"e_max\",\n \"e_ref\",\n \"ref_dnde\",\n \"norm\",\n ],\n}\n\nOPTIONAL_COLUMNS = {\n \"dnde\": [\"dnde_err\", \"dnde_errp\", \"dnde_errn\", \"dnde_ul\", \"is_ul\"],\n \"e2dnde\": [\"e2dnde_err\", \"e2dnde_errp\", \"e2dnde_errn\", \"e2dnde_ul\", \"is_ul\"],\n \"flux\": [\"flux_err\", \"flux_errp\", \"flux_errn\", \"flux_ul\", \"is_ul\"],\n \"eflux\": [\"eflux_err\", \"eflux_errp\", \"eflux_errn\", \"eflux_ul\", \"is_ul\"],\n \"likelihood\": [\"norm_scan\", \"stat_scan\"],\n}\n\nDEFAULT_UNIT = {\n \"dnde\": u.Unit(\"cm-2 s-1 TeV-1\"),\n \"e2dnde\": u.Unit(\"erg cm-2 s-1\"),\n \"flux\": u.Unit(\"cm-2 s-1\"),\n \"eflux\": u.Unit(\"erg cm-2 s-1\"),\n}\n\n\nclass FluxPoints:\n \"\"\"Flux points container.\n\n The supported formats are described here: :ref:`gadf:flux-points`\n\n In summary, the following formats and minimum required columns are:\n\n * Format ``dnde``: columns ``e_ref`` and ``dnde``\n * Format ``e2dnde``: columns ``e_ref``, ``e2dnde``\n * Format ``flux``: columns ``e_min``, ``e_max``, ``flux``\n * Format ``eflux``: columns ``e_min``, ``e_max``, ``eflux``\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table with flux point data\n\n Attributes\n ----------\n table : `~astropy.table.Table`\n Table with flux point data\n\n Examples\n --------\n The `FluxPoints` object is most easily created by reading a file with\n flux points given in one of the formats documented above::\n\n from gammapy.estimators import FluxPoints\n filename = '$GAMMAPY_DATA/hawc_crab/HAWC19_flux_points.fits'\n flux_points = FluxPoints.read(filename)\n flux_points.plot()\n\n An instance of `FluxPoints` can also be created by passing an instance of\n `astropy.table.Table`, which contains the required columns, such as `'e_ref'`\n and `'dnde'`. The corresponding `sed_type` has to be defined in the meta data\n of the table::\n\n from astropy import units as u\n from astropy.table import Table\n from gammapy.estimators import FluxPoints\n from gammapy.modeling.models import PowerLawSpectralModel\n\n table = Table()\n pwl = PowerLawSpectralModel()\n e_ref = np.logspace(0, 2, 7) * u.TeV\n table['e_ref'] = e_ref\n table['dnde'] = pwl(e_ref)\n table.meta['SED_TYPE'] = 'dnde'\n\n flux_points = FluxPoints(table)\n flux_points.plot()\n\n If you have flux points in a different data format, the format can be changed\n by renaming the table columns and adding meta data::\n\n\n from astropy import units as u\n from astropy.table import Table\n from gammapy.estimators import FluxPoints\n\n table = Table.read('$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points_ctb_37b.txt',\n format='ascii.csv', delimiter=' ', comment='#')\n table.meta['SED_TYPE'] = 'dnde'\n table.rename_column('Differential_Flux', 'dnde')\n table['dnde'].unit = 'cm-2 s-1 TeV-1'\n\n table.rename_column('lower_error', 'dnde_errn')\n table['dnde_errn'].unit = 'cm-2 s-1 TeV-1'\n\n table.rename_column('upper_error', 'dnde_errp')\n table['dnde_errp'].unit = 'cm-2 s-1 TeV-1'\n\n table.rename_column('E', 'e_ref')\n table['e_ref'].unit = 'TeV'\n\n flux_points = FluxPoints(table)\n flux_points.plot()\n\n Note: In order to reproduce the example you need the tests datasets folder.\n You may download it with the command\n ``gammapy download datasets --tests --out $GAMMAPY_DATA``\n \"\"\"\n\n def __init__(self, table):\n self.table = table_standardise_units_copy(table)\n # validate that the table is a valid representation\n # of the given flux point sed type\n self._validate_table(self.table, table.meta[\"SED_TYPE\"])\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(sed_type={self.sed_type!r}, n_points={len(self.table)})\"\n\n @property\n def table_formatted(self):\n \"\"\"Return formatted version of the flux points table. Used for pretty printing\"\"\"\n table = self.table.copy()\n\n for column in table.colnames:\n if column.startswith((\"dnde\", \"eflux\", \"flux\", \"e2dnde\", \"ref\")):\n table[column].format = \".3e\"\n elif column.startswith(\n (\"e_min\", \"e_max\", \"e_ref\", \"sqrt_ts\", \"norm\", \"ts\", \"stat\")\n ):\n table[column].format = \".3f\"\n\n return table\n\n @classmethod\n def read(cls, filename, **kwargs):\n \"\"\"Read flux points.\n\n Parameters\n ----------\n filename : str\n Filename\n kwargs : dict\n Keyword arguments passed to `astropy.table.Table.read`.\n \"\"\"\n filename = make_path(filename)\n try:\n table = Table.read(filename, **kwargs)\n except IORegistryError:\n kwargs.setdefault(\"format\", \"ascii.ecsv\")\n table = Table.read(filename, **kwargs)\n\n if \"SED_TYPE\" not in table.meta.keys():\n sed_type = cls._guess_sed_type(table)\n table.meta[\"SED_TYPE\"] = sed_type\n\n # TODO: check sign and factor 2 here\n # https://github.com/gammapy/gammapy/pull/2546#issuecomment-554274318\n # The idea below is to support the format here:\n # https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/flux_points/index.html#likelihood-columns\n # but internally to go to the uniform \"stat\"\n\n if \"loglike\" in table.colnames and \"stat\" not in table.colnames:\n table[\"stat\"] = 2 * table[\"loglike\"]\n\n if \"loglike_null\" in table.colnames and \"stat_null\" not in table.colnames:\n table[\"stat_null\"] = 2 * table[\"loglike_null\"]\n\n if \"dloglike_scan\" in table.colnames and \"stat_scan\" not in table.colnames:\n table[\"stat_scan\"] = 2 * table[\"dloglike_scan\"]\n\n return cls(table=table)\n\n def write(self, filename, **kwargs):\n \"\"\"Write flux points.\n\n Parameters\n ----------\n filename : str\n Filename\n kwargs : dict\n Keyword arguments passed to `astropy.table.Table.write`.\n \"\"\"\n filename = make_path(filename)\n try:\n self.table.write(filename, **kwargs)\n except IORegistryError:\n kwargs.setdefault(\"format\", \"ascii.ecsv\")\n self.table.write(filename, **kwargs)\n\n @classmethod\n def stack(cls, flux_points):\n \"\"\"Create flux points by stacking list of flux points.\n\n The first `FluxPoints` object in the list is taken as a reference to infer\n column names and units for the stacked object.\n\n Parameters\n ----------\n flux_points : list of `FluxPoints`\n List of flux points to stack.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points without upper limit points.\n \"\"\"\n reference = flux_points[0].table\n\n tables = []\n for _ in flux_points:\n table = _.table\n for colname in reference.colnames:\n column = reference[colname]\n if column.unit:\n table[colname] = table[colname].quantity.to(column.unit)\n tables.append(table[reference.colnames])\n\n table_stacked = vstack(tables)\n table_stacked.meta[\"SED_TYPE\"] = reference.meta[\"SED_TYPE\"]\n\n return cls(table_stacked)\n\n def drop_ul(self):\n \"\"\"Drop upper limit flux points.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points with upper limit points removed.\n\n Examples\n --------\n >>> from gammapy.estimators import FluxPoints\n >>> filename = '$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points.fits'\n >>> flux_points = FluxPoints.read(filename)\n >>> print(flux_points)\n FluxPoints(sed_type=\"flux\", n_points=24)\n >>> print(flux_points.drop_ul())\n FluxPoints(sed_type=\"flux\", n_points=19)\n\n Note: In order to reproduce the example you need the tests datasets folder.\n You may download it with the command\n ``gammapy download datasets --tests --out $GAMMAPY_DATA``\n \"\"\"\n table_drop_ul = self.table[~self.is_ul]\n return self.__class__(table_drop_ul)\n\n def _flux_to_dnde(self, e_ref, table, model, pwl_approx):\n if model is None:\n model = PowerLawSpectralModel()\n\n e_min, e_max = self.e_min, self.e_max\n\n flux = table[\"flux\"].quantity\n dnde = self._dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx)\n\n # Add to result table\n table[\"e_ref\"] = e_ref\n table[\"dnde\"] = dnde\n\n if \"flux_err\" in table.colnames:\n table[\"dnde_err\"] = dnde * table[\"flux_err\"].quantity / flux\n\n if \"flux_errn\" in table.colnames:\n table[\"dnde_errn\"] = dnde * table[\"flux_errn\"].quantity / flux\n table[\"dnde_errp\"] = dnde * table[\"flux_errp\"].quantity / flux\n\n if \"flux_ul\" in table.colnames:\n flux_ul = table[\"flux_ul\"].quantity\n dnde_ul = self._dnde_from_flux(\n flux_ul, model, e_ref, e_min, e_max, pwl_approx\n )\n table[\"dnde_ul\"] = dnde_ul\n\n return table\n\n @staticmethod\n def _dnde_to_e2dnde(e_ref, table):\n for suffix in [\"\", \"_ul\", \"_err\", \"_errp\", \"_errn\"]:\n try:\n data = table[\"dnde\" + suffix].quantity\n table[\"e2dnde\" + suffix] = (e_ref ** 2 * data).to(\n DEFAULT_UNIT[\"e2dnde\"]\n )\n except KeyError:\n continue\n\n return table\n\n @staticmethod\n def _e2dnde_to_dnde(e_ref, table):\n for suffix in [\"\", \"_ul\", \"_err\", \"_errp\", \"_errn\"]:\n try:\n data = table[\"e2dnde\" + suffix].quantity\n table[\"dnde\" + suffix] = (data / e_ref ** 2).to(DEFAULT_UNIT[\"dnde\"])\n except KeyError:\n continue\n\n return table\n\n def to_sed_type(self, sed_type, method=\"log_center\", model=None, pwl_approx=False):\n \"\"\"Convert to a different SED type (return new `FluxPoints`).\n\n See: https://ui.adsabs.harvard.edu/abs/1995NIMPA.355..541L for details\n on the `'lafferty'` method.\n\n Parameters\n ----------\n sed_type : {'dnde'}\n SED type to convert to.\n model : `~gammapy.modeling.models.SpectralModel`\n Spectral model assumption. Note that the value of the amplitude parameter\n does not matter. Still it is recommended to use something with the right\n scale and units. E.g. `amplitude = 1e-12 * u.Unit('cm-2 s-1 TeV-1')`\n method : {'lafferty', 'log_center', 'table'}\n Flux points `e_ref` estimation method:\n\n * `'laferty'` Lafferty & Wyatt model-based e_ref\n * `'log_center'` log bin center e_ref\n * `'table'` using column 'e_ref' from input flux_points\n pwl_approx : bool\n Use local power law appoximation at e_ref to compute differential flux\n from the integral flux. This method is used by the Fermi-LAT catalogs.\n\n Returns\n -------\n flux_points : `FluxPoints`\n Flux points including differential quantity columns `dnde`\n and `dnde_err` (optional), `dnde_ul` (optional).\n\n Examples\n --------\n >>> from gammapy.estimators import FluxPoints\n >>> from gammapy.modeling.models import PowerLawSpectralModel\n >>> filename = '$GAMMAPY_DATA/tests/spectrum/flux_points/flux_points.fits'\n >>> flux_points = FluxPoints.read(filename)\n >>> model = PowerLawSpectralModel(index=2.2)\n >>> flux_points_dnde = flux_points.to_sed_type('dnde', model=model)\n\n Note: In order to reproduce the example you need the tests datasets folder.\n You may download it with the command\n ``gammapy download datasets --tests --out $GAMMAPY_DATA``\n \"\"\"\n # TODO: implement other directions.\n table = self.table.copy()\n\n if self.sed_type == \"flux\" and sed_type == \"dnde\":\n # Compute e_ref\n if method == \"table\":\n e_ref = table[\"e_ref\"].quantity\n elif method == \"log_center\":\n e_ref = np.sqrt(self.e_min * self.e_max)\n elif method == \"lafferty\":\n # set e_ref that it represents the mean dnde in the given energy bin\n e_ref = self._e_ref_lafferty(model, self.e_min, self.e_max)\n else:\n raise ValueError(f\"Invalid method: {method}\")\n table = self._flux_to_dnde(e_ref, table, model, pwl_approx)\n\n elif self.sed_type == \"dnde\" and sed_type == \"e2dnde\":\n table = self._dnde_to_e2dnde(self.e_ref, table)\n\n elif self.sed_type == \"e2dnde\" and sed_type == \"dnde\":\n table = self._e2dnde_to_dnde(self.e_ref, table)\n\n elif self.sed_type == \"likelihood\" and sed_type in [\"dnde\", \"flux\", \"eflux\"]:\n for suffix in [\"\", \"_ul\", \"_err\", \"_errp\", \"_errn\"]:\n try:\n table[sed_type + suffix] = (\n table[\"ref_\" + sed_type] * table[\"norm\" + suffix]\n )\n except KeyError:\n continue\n elif self.sed_type == sed_type:\n # do nothing if the sed type is the same\n pass\n\n else:\n raise NotImplementedError\n\n table.meta[\"SED_TYPE\"] = sed_type\n return FluxPoints(table)\n\n @staticmethod\n def _e_ref_lafferty(model, e_min, e_max):\n \"\"\"Helper for `to_sed_type`.\n\n Compute e_ref that the value at e_ref corresponds\n to the mean value between e_min and e_max.\n \"\"\"\n flux = model.integral(e_min, e_max)\n dnde_mean = flux / (e_max - e_min)\n return model.inverse(dnde_mean)\n\n @staticmethod\n def _dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx):\n \"\"\"Helper for `to_sed_type`.\n\n Compute dnde under the assumption that flux equals expected\n flux from model.\n \"\"\"\n dnde_model = model(e_ref)\n\n if pwl_approx:\n index = model.spectral_index(e_ref)\n flux_model = PowerLawSpectralModel.evaluate_integral(\n emin=e_min,\n emax=e_max,\n index=index,\n reference=e_ref,\n amplitude=dnde_model,\n )\n else:\n flux_model = model.integral(e_min, e_max)\n\n return dnde_model * (flux / flux_model)\n\n @property\n def sed_type(self):\n \"\"\"SED type (str).\n\n One of: {'dnde', 'e2dnde', 'flux', 'eflux'}\n \"\"\"\n return self.table.meta[\"SED_TYPE\"]\n\n @staticmethod\n def _guess_sed_type(table):\n \"\"\"Guess SED type from table content.\"\"\"\n valid_sed_types = list(REQUIRED_COLUMNS.keys())\n for sed_type in valid_sed_types:\n required = set(REQUIRED_COLUMNS[sed_type])\n if required.issubset(table.colnames):\n return sed_type\n\n @staticmethod\n def _guess_sed_type_from_unit(unit):\n \"\"\"Guess SED type from unit.\"\"\"\n for sed_type, default_unit in DEFAULT_UNIT.items():\n if unit.is_equivalent(default_unit):\n return sed_type\n\n @staticmethod\n def _validate_table(table, sed_type):\n \"\"\"Validate input table.\"\"\"\n required = set(REQUIRED_COLUMNS[sed_type])\n\n if not required.issubset(table.colnames):\n missing = required.difference(table.colnames)\n raise ValueError(\n \"Missing columns for sed type '{}':\" \" {}\".format(sed_type, missing)\n )\n\n @staticmethod\n def _get_y_energy_unit(y_unit):\n \"\"\"Get energy part of the given y unit.\"\"\"\n try:\n return [_ for _ in y_unit.bases if _.physical_type == \"energy\"][0]\n except IndexError:\n return u.Unit(\"TeV\")\n\n def _plot_get_energy_err(self):\n \"\"\"Compute energy error for given sed type\"\"\"\n try:\n e_min = self.table[\"e_min\"].quantity\n e_max = self.table[\"e_max\"].quantity\n e_ref = self.e_ref\n x_err = ((e_ref - e_min), (e_max - e_ref))\n except KeyError:\n x_err = None\n return x_err\n\n def _plot_get_flux_err(self, sed_type=None):\n \"\"\"Compute flux error for given sed type\"\"\"\n try:\n # asymmetric error\n y_errn = self.table[sed_type + \"_errn\"].quantity\n y_errp = self.table[sed_type + \"_errp\"].quantity\n y_err = (y_errn, y_errp)\n except KeyError:\n try:\n # symmetric error\n y_err = self.table[sed_type + \"_err\"].quantity\n y_err = (y_err, y_err)\n except KeyError:\n # no error at all\n y_err = None\n return y_err\n\n @property\n def is_ul(self):\n try:\n return self.table[\"is_ul\"].data.astype(\"bool\")\n except KeyError:\n return np.isnan(self.table[self.sed_type])\n\n @property\n def e_ref(self):\n \"\"\"Reference energy.\n\n Defined by `e_ref` column in `FluxPoints.table` or computed as log\n center, if `e_min` and `e_max` columns are present in `FluxPoints.table`.\n\n Returns\n -------\n e_ref : `~astropy.units.Quantity`\n Reference energy.\n \"\"\"\n try:\n return self.table[\"e_ref\"].quantity\n except KeyError:\n return np.sqrt(self.e_min * self.e_max)\n\n @property\n def e_edges(self):\n \"\"\"Edges of the energy bin.\n\n Returns\n -------\n e_edges : `~astropy.units.Quantity`\n Energy edges.\n \"\"\"\n e_edges = list(self.e_min)\n e_edges += [self.e_max[-1]]\n return u.Quantity(e_edges, self.e_min.unit, copy=False)\n\n @property\n def e_min(self):\n \"\"\"Lower bound of energy bin.\n\n Defined by `e_min` column in `FluxPoints.table`.\n\n Returns\n -------\n e_min : `~astropy.units.Quantity`\n Lower bound of energy bin.\n \"\"\"\n return self.table[\"e_min\"].quantity\n\n @property\n def e_max(self):\n \"\"\"Upper bound of energy bin.\n\n Defined by ``e_max`` column in ``table``.\n\n Returns\n -------\n e_max : `~astropy.units.Quantity`\n Upper bound of energy bin.\n \"\"\"\n return self.table[\"e_max\"].quantity\n\n def plot(\n self, ax=None, energy_unit=\"TeV\", flux_unit=None, energy_power=0, **kwargs\n ):\n \"\"\"Plot flux points.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`\n Axis object to plot on.\n energy_unit : str, `~astropy.units.Unit`, optional\n Unit of the energy axis\n flux_unit : str, `~astropy.units.Unit`, optional\n Unit of the flux axis\n energy_power : int\n Power of energy to multiply y axis with\n kwargs : dict\n Keyword arguments passed to :func:`matplotlib.pyplot.errorbar`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis object\n \"\"\"\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n sed_type = self.sed_type\n y_unit = u.Unit(flux_unit or DEFAULT_UNIT[sed_type])\n\n y = self.table[sed_type].quantity.to(y_unit)\n x = self.e_ref.to(energy_unit)\n\n # get errors and ul\n is_ul = self.is_ul\n x_err_all = self._plot_get_energy_err()\n y_err_all = self._plot_get_flux_err(sed_type)\n\n # handle energy power\n e_unit = self._get_y_energy_unit(y_unit)\n y_unit = y.unit * e_unit ** energy_power\n y = (y * np.power(x, energy_power)).to(y_unit)\n\n y_err, x_err = None, None\n\n if y_err_all:\n y_errn = (y_err_all[0] * np.power(x, energy_power)).to(y_unit)\n y_errp = (y_err_all[1] * np.power(x, energy_power)).to(y_unit)\n y_err = (y_errn[~is_ul].to_value(y_unit), y_errp[~is_ul].to_value(y_unit))\n\n if x_err_all:\n x_errn, x_errp = x_err_all\n x_err = (\n x_errn[~is_ul].to_value(energy_unit),\n x_errp[~is_ul].to_value(energy_unit),\n )\n\n # set flux points plotting defaults\n kwargs.setdefault(\"marker\", \"+\")\n kwargs.setdefault(\"ls\", \"None\")\n\n ebar = ax.errorbar(\n x[~is_ul].value, y[~is_ul].value, yerr=y_err, xerr=x_err, **kwargs\n )\n\n if is_ul.any():\n if x_err_all:\n x_errn, x_errp = x_err_all\n x_err = (\n x_errn[is_ul].to_value(energy_unit),\n x_errp[is_ul].to_value(energy_unit),\n )\n\n y_ul = self.table[sed_type + \"_ul\"].quantity\n y_ul = (y_ul * np.power(x, energy_power)).to(y_unit)\n\n y_err = (0.5 * y_ul[is_ul].value, np.zeros_like(y_ul[is_ul].value))\n\n kwargs.setdefault(\"color\", ebar[0].get_color())\n\n # pop label keyword to avoid that it appears twice in the legend\n kwargs.pop(\"label\", None)\n ax.errorbar(\n x[is_ul].value,\n y_ul[is_ul].value,\n xerr=x_err,\n yerr=y_err,\n uplims=True,\n **kwargs,\n )\n\n ax.set_xscale(\"log\", nonposx=\"clip\")\n ax.set_yscale(\"log\", nonposy=\"clip\")\n ax.set_xlabel(f\"Energy ({energy_unit})\")\n ax.set_ylabel(f\"{self.sed_type} ({y_unit})\")\n return ax\n\n def plot_ts_profiles(\n self,\n ax=None,\n energy_unit=\"TeV\",\n add_cbar=True,\n y_values=None,\n y_unit=None,\n **kwargs,\n ):\n \"\"\"Plot fit statistic SED profiles as a density plot.\n\n Parameters\n ----------\n ax : `~matplotlib.axes.Axes`\n Axis object to plot on.\n energy_unit : str, `~astropy.units.Unit`, optional\n Unit of the energy axis\n y_values : `astropy.units.Quantity`\n Array of y-values to use for the fit statistic profile evaluation.\n y_unit : str or `astropy.units.Unit`\n Unit to use for the y-axis.\n add_cbar : bool\n Whether to add a colorbar to the plot.\n kwargs : dict\n Keyword arguments passed to :func:`matplotlib.pyplot.pcolormesh`\n\n Returns\n -------\n ax : `~matplotlib.axes.Axes`\n Axis object\n \"\"\"\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n self._validate_table(self.table, \"likelihood\")\n y_unit = u.Unit(y_unit or DEFAULT_UNIT[self.sed_type])\n\n if y_values is None:\n ref_values = self.table[\"ref_\" + self.sed_type].quantity\n y_values = np.logspace(\n np.log10(0.2 * ref_values.value.min()),\n np.log10(5 * ref_values.value.max()),\n 500,\n )\n y_values = u.Quantity(y_values, y_unit, copy=False)\n\n x = self.e_edges.to(energy_unit)\n\n # Compute fit statistic \"image\" one energy bin at a time\n # by interpolating e2dnde at the log bin centers\n z = np.empty((len(self.table), len(y_values)))\n for idx, row in enumerate(self.table):\n y_ref = self.table[\"ref_\" + self.sed_type].quantity[idx]\n norm = (y_values / y_ref).to_value(\"\")\n norm_scan = row[\"norm_scan\"]\n ts_scan = row[\"stat_scan\"] - row[\"stat\"]\n interp = interpolate_profile(norm_scan, ts_scan)\n z[idx] = interp((norm,))\n\n kwargs.setdefault(\"vmax\", 0)\n kwargs.setdefault(\"vmin\", -4)\n kwargs.setdefault(\"zorder\", 0)\n kwargs.setdefault(\"cmap\", \"Blues\")\n kwargs.setdefault(\"linewidths\", 0)\n\n # clipped values are set to NaN so that they appear white on the plot\n z[-z < kwargs[\"vmin\"]] = np.nan\n caxes = ax.pcolormesh(x.value, y_values.value, -z.T, **kwargs)\n ax.set_xscale(\"log\", nonposx=\"clip\")\n ax.set_yscale(\"log\", nonposy=\"clip\")\n ax.set_xlabel(f\"Energy ({energy_unit})\")\n ax.set_ylabel(f\"{self.sed_type} ({y_values.unit})\")\n\n if add_cbar:\n label = \"fit statistic difference\"\n ax.figure.colorbar(caxes, ax=ax, label=label)\n\n return ax\n\n\nclass FluxPointsEstimator(Estimator):\n \"\"\"Flux points estimator.\n\n Estimates flux points for a given list of datasets, energies and spectral model.\n\n To estimate the flux point the amplitude of the reference spectral model is\n fitted within the energy range defined by the energy group. This is done for\n each group independently. The amplitude is re-normalized using the \"norm\" parameter,\n which specifies the deviation of the flux from the reference model in this\n energy group. See https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/binned_likelihoods/index.html\n for details.\n\n The method is also described in the Fermi-LAT catalog paper\n https://ui.adsabs.harvard.edu/#abs/2015ApJS..218...23A\n or the HESS Galactic Plane Survey paper\n https://ui.adsabs.harvard.edu/#abs/2018A%26A...612A...1H\n\n Parameters\n ----------\n e_edges : `~astropy.units.Quantity`\n Energy edges of the flux point bins.\n source : str or int\n For which source in the model to compute the flux points.\n norm_min : float\n Minimum value for the norm used for the fit statistic profile evaluation.\n norm_max : float\n Maximum value for the norm used for the fit statistic profile evaluation.\n norm_n_values : int\n Number of norm values used for the fit statistic profile.\n norm_values : `numpy.ndarray`\n Array of norm values to be used for the fit statistic profile.\n n_sigma : int\n Number of sigma to use for asymmetric error computation. Default is 1.\n n_sigma_ul : int\n Number of sigma to use for upper limit computation. Default is 2.\n reoptimize : bool\n Re-optimize other free model parameters.\n selection_optional : list of str\n Which additional quantities to estimate. Available options are:\n\n * \"errn-errp\": estimate asymmetric errors on flux.\n * \"ul\": estimate upper limits.\n * \"norm-scan\": estimate fit statistic profiles.\n\n By default all steps are executed.\n \"\"\"\n\n tag = \"FluxPointsEstimator\"\n _available_selection_optional = [\"errn-errp\", \"ul\", \"scan\"]\n\n def __init__(\n self,\n e_edges=[1, 10] * u.TeV,\n source=0,\n norm_min=0.2,\n norm_max=5,\n norm_n_values=11,\n norm_values=None,\n n_sigma=1,\n n_sigma_ul=2,\n reoptimize=False,\n selection_optional=\"all\",\n ):\n self.e_edges = e_edges\n self.source = source\n self.norm_min = norm_min\n self.norm_max = norm_max\n self.norm_n_values = norm_n_values\n self.norm_values = norm_values\n self.n_sigma = n_sigma\n self.n_sigma_ul = n_sigma_ul\n self.reoptimize = reoptimize\n self.selection_optional = selection_optional\n\n def _flux_estimator(self, e_min, e_max):\n return FluxEstimator(\n source=self.source,\n e_min=e_min,\n e_max=e_max,\n norm_min=self.norm_min,\n norm_max=self.norm_max,\n norm_n_values=self.norm_n_values,\n norm_values=self.norm_values,\n n_sigma=self.n_sigma,\n n_sigma_ul=self.n_sigma_ul,\n reoptimize=self.reoptimize,\n selection_optional=self.selection_optional,\n\n )\n\n def run(self, datasets):\n \"\"\"Run the flux point estimator for all energy groups.\n\n Parameters\n ----------\n datasets : list of `~gammapy.datasets.Dataset`\n Datasets\n\n Returns\n -------\n flux_points : `FluxPoints`\n Estimated flux points.\n \"\"\"\n datasets = Datasets(datasets).copy()\n\n rows = []\n\n for e_min, e_max in zip(self.e_edges[:-1], self.e_edges[1:]):\n row = self.estimate_flux_point(datasets, e_min=e_min, e_max=e_max)\n rows.append(row)\n\n table = table_from_row_data(rows=rows, meta={\"SED_TYPE\": \"likelihood\"})\n\n #TODO: this should be changed once likelihood is fully supported\n return FluxPoints(table).to_sed_type(\"dnde\")\n\n def estimate_flux_point(self, datasets, e_min, e_max):\n \"\"\"Estimate flux point for a single energy group.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n e_min, e_max : `~astropy.units.Quantity`\n Energy bounds to compute the flux point for.\n\n Returns\n -------\n result : dict\n Dict with results for the flux point.\n \"\"\"\n result = self.estimate_counts(datasets, e_min=e_min, e_max=e_max)\n\n datasets = datasets.slice_by_energy(e_min=e_min, e_max=e_max)\n\n if len(datasets) > 0:\n # TODO: refactor energy handling of FluxEstimator?\n energy_axis = datasets[0].counts.geom.axes[\"energy\"]\n e_min, e_max = energy_axis.edges.min(), energy_axis.edges.max()\n\n fe = self._flux_estimator(e_min=e_min, e_max=e_max)\n\n result.update(fe.run(datasets=datasets))\n\n return result\n\n @staticmethod\n def estimate_counts(datasets, e_min, e_max):\n \"\"\"Estimate counts for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n e_min, e_max : `~astropy.units.Quantity`\n Energy bounds to compute the flux point for.\n\n Returns\n -------\n result : dict\n Dict with an array with one entry per dataset with counts for the flux point.\n \"\"\"\n counts = []\n\n for dataset in datasets:\n energy_mask = dataset.counts.geom.energy_mask(\n emin=e_min, emax=e_max, round_to_edge=True\n )\n mask = dataset.mask & energy_mask\n counts.append(dataset.counts.data[mask].sum())\n\n return {\"counts\": np.array(counts, dtype=int)}\n", "path": "gammapy/estimators/flux_point.py" } ]
diff --git a/gammapy/estimators/flux_point.py b/gammapy/estimators/flux_point.py index 62d1e45e36..685957d4bc 100644 --- a/gammapy/estimators/flux_point.py +++ b/gammapy/estimators/flux_point.py @@ -435,7 +435,7 @@ def _dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx): amplitude=dnde_model, ) else: - flux_model = model.integral(e_min, e_max, intervals=True) + flux_model = model.integral(e_min, e_max) return dnde_model * (flux / flux_model) diff --git a/gammapy/estimators/tests/test_flux_point.py b/gammapy/estimators/tests/test_flux_point.py index fed6f5d3d2..1441ee7774 100644 --- a/gammapy/estimators/tests/test_flux_point.py +++ b/gammapy/estimators/tests/test_flux_point.py @@ -139,6 +139,16 @@ def test_compute_flux_points_dnde_exp(method): assert_quantity_allclose(actual, desired, rtol=1e-8) +@requires_data() +def test_fermi_to_dnde(): + from gammapy.catalog import CATALOG_REGISTRY + catalog_4fgl = CATALOG_REGISTRY.get_cls("4fgl")() + src = catalog_4fgl["FGES J1553.8-5325"] + fp_dnde = src.flux_points.to_sed_type("dnde", model=src.spectral_model()) + + assert_allclose(fp_dnde.table["dnde"].quantity[1], 4.567393e-10 * u.Unit("cm-2 s-1 MeV-1"), rtol=1e-5) + + @pytest.fixture(params=FLUX_POINTS_FILES, scope="session") def flux_points(request): path = "$GAMMAPY_DATA/tests/spectrum/flux_points/" + request.param diff --git a/gammapy/modeling/models/tests/test_cube.py b/gammapy/modeling/models/tests/test_cube.py index a51b125e29..f7571911ae 100644 --- a/gammapy/modeling/models/tests/test_cube.py +++ b/gammapy/modeling/models/tests/test_cube.py @@ -23,7 +23,7 @@ SkyModel, create_fermi_isotropic_diffuse_model, ) -from gammapy.utils.testing import requires_data, mpl_plot_check +from gammapy.utils.testing import requires_data, mpl_plot_check, requires_dependency from gammapy.modeling import Parameter from astropy.coordinates.angle_utilities import angular_separation @@ -612,6 +612,7 @@ def test_energy_dependent_model(geom_true): assert_allclose(model.data.sum(), 1.678314e-14, rtol=1e-3) +@requires_dependency("matplotlib") def test_plot_grid(geom_true): spatial_model = MyCustomGaussianModel(frame="galactic") with mpl_plot_check(): diff --git a/gammapy/modeling/models/tests/test_spatial.py b/gammapy/modeling/models/tests/test_spatial.py index 86e4d2acba..489d3c3d08 100644 --- a/gammapy/modeling/models/tests/test_spatial.py +++ b/gammapy/modeling/models/tests/test_spatial.py @@ -235,6 +235,7 @@ def test_sky_diffuse_constant(): assert isinstance(model.to_region(), EllipseSkyRegion) +@requires_dependency("matplotlib") @requires_data() def test_sky_diffuse_map(): filename = "$GAMMAPY_DATA/catalogs/fermi/Extended_archive_v18/Templates/RXJ1713_2016_250GeV.fits" @@ -242,18 +243,23 @@ def test_sky_diffuse_map(): lon = [258.5, 0] * u.deg lat = -39.8 * u.deg val = model(lon, lat) + assert val.unit == "sr-1" desired = [3269.178107, 0] assert_allclose(val.value, desired) + res = model.evaluate_geom(model.map.geom) assert_allclose(np.sum(res.value), 32816514.42078349) radius = model.evaluation_radius + assert radius.unit == "deg" assert_allclose(radius.value, 0.64, rtol=1.0e-2) assert model.frame == "fk5" assert isinstance(model.to_region(), PolygonSkyRegion) + with pytest.raises(TypeError): model.plot_interative() + with pytest.raises(TypeError): model.plot_grid() @@ -266,13 +272,17 @@ def test_sky_diffuse_map_3d(): lat = -39.8 * u.deg energy = 1 * u.GeV val = model(lon, lat, energy) + with pytest.raises(ValueError): model(lon, lat) assert model.map.unit == "cm-2 s-1 MeV-1 sr-1" + val = model(lon, lat, energy) assert val.unit == "cm-2 s-1 MeV-1 sr-1" + res = model.evaluate_geom(model.map.geom) assert_allclose(np.sum(res.value), 0.11803847221522712) + with pytest.raises(TypeError): model.plot()
xorbitsai__inference-386
QUESTION: Upper bound for the max_token argument I noticed that there is an upper bound of 2048 tokens placed on the max_token argument in [xinference/core/restful_api.py](https://github.com/xorbitsai/inference/blob/67a06b40ef4ec36448e504fd23526043719211bc/xinference/core/restful_api.py#L39-L41) https://github.com/xorbitsai/inference/blob/67a06b40ef4ec36448e504fd23526043719211bc/xinference/core/restful_api.py#L39-L41 Why is this necessary? I feel like it would be reasonable to generate a 4000 token long essay with a model with matching context length, especially when we now have models like [InternLM](https://github.com/xorbitsai/inference/blob/67a06b40ef4ec36448e504fd23526043719211bc/xinference/model/llm/llm_family.json#L965-L1007) with 8k context length and [ChatGLM2-32k](https://github.com/xorbitsai/inference/blob/67a06b40ef4ec36448e504fd23526043719211bc/xinference/model/llm/llm_family.json#L450-L483) with 32k context length.
[ { "content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport socket\nimport threading\nfrom functools import partial\nfrom typing import Any, Dict, List, Literal, Optional, Union\n\nimport anyio\nimport gradio as gr\nimport xoscar as xo\nfrom anyio.streams.memory import MemoryObjectSendStream\nfrom fastapi import APIRouter, FastAPI, HTTPException, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import JSONResponse\nfrom pydantic import BaseModel, Field\nfrom sse_starlette.sse import EventSourceResponse\nfrom typing_extensions import NotRequired, TypedDict\nfrom uvicorn import Config, Server\n\nfrom ..types import ChatCompletion, Completion, Embedding\nfrom .supervisor import SupervisorActor\n\nlogger = logging.getLogger(__name__)\n\nmax_tokens_field = Field(\n default=128, ge=1, le=2048, description=\"The maximum number of tokens to generate.\"\n)\n\ntemperature_field = Field(\n default=0.8,\n ge=0.0,\n le=2.0,\n description=\"Adjust the randomness of the generated text.\\n\\n\"\n + \"Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run.\",\n)\n\ntop_p_field = Field(\n default=0.95,\n ge=0.0,\n le=1.0,\n description=\"Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\\n\\n\"\n + \"Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.\",\n)\n\nstop_field = Field(\n default=None,\n description=\"A list of tokens at which to stop generation. If None, no stop tokens are used.\",\n)\n\nstream_field = Field(\n default=False,\n description=\"Whether to stream the results as they are generated. Useful for chatbots.\",\n)\n\ntop_k_field = Field(\n default=40,\n ge=0,\n description=\"Limit the next token selection to the K most probable tokens.\\n\\n\"\n + \"Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text.\",\n)\n\nrepetition_penalty_field = Field(\n default=1.1,\n ge=0.0,\n description=\"A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\\n\\n\"\n + \"Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient.\",\n)\n\npresence_penalty_field = Field(\n default=0.0,\n ge=-2.0,\n le=2.0,\n description=\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\",\n)\n\nfrequency_penalty_field = Field(\n default=0.0,\n ge=-2.0,\n le=2.0,\n description=\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\",\n)\n\nmirostat_mode_field = Field(\n default=0,\n ge=0,\n le=2,\n description=\"Enable Mirostat constant-perplexity algorithm of the specified version (1 or 2; 0 = disabled)\",\n)\n\nmirostat_tau_field = Field(\n default=5.0,\n ge=0.0,\n le=10.0,\n description=\"Mirostat target entropy, i.e. the target perplexity - lower values produce focused and coherent text, larger values produce more diverse and less coherent text\",\n)\n\nmirostat_eta_field = Field(\n default=0.1, ge=0.001, le=1.0, description=\"Mirostat learning rate\"\n)\n\n\nclass CreateCompletionRequest(BaseModel):\n prompt: str\n suffix: Optional[str] = Field(None)\n max_tokens: int = max_tokens_field\n temperature: float = temperature_field\n top_p: float = top_p_field\n mirostat_mode: int = mirostat_mode_field\n mirostat_tau: float = mirostat_tau_field\n mirostat_eta: float = mirostat_eta_field\n echo: bool = Field(\n default=False,\n description=\"Whether to echo the prompt in the generated text. Useful for chatbots.\",\n )\n stop: Optional[Union[str, List[str]]] = stop_field\n stream: bool = stream_field\n logprobs: Optional[int] = Field(\n default=None,\n ge=0,\n description=\"The number of logprobs to generate. If None, no logprobs are generated.\",\n )\n presence_penalty: Optional[float] = presence_penalty_field\n frequency_penalty: Optional[float] = frequency_penalty_field\n logit_bias: Optional[Dict[str, float]] = Field(None)\n\n model: str\n n: Optional[int] = 1\n best_of: Optional[int] = 1\n user: Optional[str] = Field(None)\n\n # llama.cpp specific parameters\n top_k: int = top_k_field\n repetition_penalty: float = repetition_penalty_field\n logit_bias_type: Optional[Literal[\"input_ids\", \"tokens\"]] = Field(None)\n\n class Config:\n schema_extra = {\n \"example\": {\n \"prompt\": \"\\n\\n### Instructions:\\nWhat is the capital of France?\\n\\n### Response:\\n\",\n \"stop\": [\"\\n\", \"###\"],\n }\n }\n\n\nclass CreateEmbeddingRequest(BaseModel):\n model: str\n input: Union[str, List[str]] = Field(description=\"The input to embed.\")\n user: Optional[str] = None\n\n class Config:\n schema_extra = {\n \"example\": {\n \"input\": \"The food was delicious and the waiter...\",\n }\n }\n\n\nclass ChatCompletionRequestMessage(TypedDict):\n role: Literal[\"assistant\", \"user\", \"system\"]\n content: str\n user: NotRequired[str]\n\n\nclass CreateChatCompletionRequest(BaseModel):\n messages: List[ChatCompletionRequestMessage] = Field(\n default=[], description=\"A list of messages to generate completions for.\"\n )\n max_tokens: int = max_tokens_field\n temperature: float = temperature_field\n top_p: float = top_p_field\n mirostat_mode: int = mirostat_mode_field\n mirostat_tau: float = mirostat_tau_field\n mirostat_eta: float = mirostat_eta_field\n stop: Optional[Union[str, List[str]]] = stop_field\n stream: bool = stream_field\n presence_penalty: Optional[float] = presence_penalty_field\n frequency_penalty: Optional[float] = frequency_penalty_field\n logit_bias: Optional[Dict[str, float]] = Field(None)\n\n model: str\n n: Optional[int] = 1\n user: Optional[str] = Field(None)\n\n # llama.cpp specific parameters\n top_k: int = top_k_field\n repetition_penalty: float = repetition_penalty_field\n logit_bias_type: Optional[Literal[\"input_ids\", \"tokens\"]] = Field(None)\n\n class Config:\n schema_extra = {\n \"example\": {\n \"messages\": [\n {\"role\": \"system\", \"content\": \"you are a helpful AI assistant\"},\n {\"role\": \"user\", \"content\": \"Hello!\"},\n {\"role\": \"assistant\", \"content\": \"Hi what can I help you?\"},\n ]\n }\n }\n\n\nclass RegisterModelRequest(BaseModel):\n model: str\n persist: bool\n\n\nclass RESTfulAPIActor(xo.Actor):\n def __init__(self, sockets: List[socket.socket], gradio_block: gr.Blocks):\n super().__init__()\n self._supervisor_ref: xo.ActorRefType[\"SupervisorActor\"]\n self._sockets = sockets\n self._gradio_block = gradio_block\n self._router = None\n\n @classmethod\n def uid(cls) -> str:\n return \"RESTfulAPI\"\n\n async def __post_create__(self):\n self._supervisor_ref = await xo.actor_ref(\n address=self.address, uid=SupervisorActor.uid()\n )\n\n def serve(self):\n app = FastAPI()\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n self._router = APIRouter()\n self._router.add_api_route(\"/v1/models\", self.list_models, methods=[\"GET\"])\n self._router.add_api_route(\n \"/v1/models/{model_uid}\", self.describe_model, methods=[\"GET\"]\n )\n self._router.add_api_route(\"/v1/models\", self.launch_model, methods=[\"POST\"])\n self._router.add_api_route(\n \"/v1/models/{model_uid}\", self.terminate_model, methods=[\"DELETE\"]\n )\n self._router.add_api_route(\"/v1/address\", self.get_address, methods=[\"GET\"])\n self._router.add_api_route(\n \"/v1/completions\",\n self.create_completion,\n methods=[\"POST\"],\n response_model=Completion,\n )\n self._router.add_api_route(\n \"/v1/embeddings\",\n self.create_embedding,\n methods=[\"POST\"],\n response_model=Embedding,\n )\n self._router.add_api_route(\n \"/v1/chat/completions\",\n self.create_chat_completion,\n methods=[\"POST\"],\n response_model=ChatCompletion,\n )\n\n # for custom models\n self._router.add_api_route(\n \"/v1/model_registrations/{model_type}\",\n self.register_model,\n methods=[\"POST\"],\n )\n self._router.add_api_route(\n \"/v1/model_registrations/{model_type}/{model_name}\",\n self.unregister_model,\n methods=[\"DELETE\"],\n )\n self._router.add_api_route(\n \"/v1/model_registrations/{model_type}\",\n self.list_model_registrations,\n methods=[\"GET\"],\n )\n self._router.add_api_route(\n \"/v1/model_registrations/{model_type}/{model_name}\",\n self.get_model_registrations,\n methods=[\"GET\"],\n )\n\n app.include_router(self._router)\n app = gr.mount_gradio_app(app, self._gradio_block, path=\"/\")\n\n # run uvicorn in another daemon thread.\n config = Config(app=app, log_level=\"critical\")\n server = Server(config)\n\n def _serve():\n httpx_logger = logging.getLogger(\"httpx\")\n httpx_logger.setLevel(logging.CRITICAL)\n server.run(self._sockets)\n\n server_thread = threading.Thread(target=_serve, daemon=True)\n server_thread.start()\n\n async def list_models(self) -> Dict[str, Dict[str, Any]]:\n try:\n return await self._supervisor_ref.list_models()\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def describe_model(self, model_uid: str) -> Dict[str, Any]:\n try:\n return await self._supervisor_ref.describe_model(model_uid)\n\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def launch_model(self, request: Request) -> JSONResponse:\n payload = await request.json()\n model_uid = payload.get(\"model_uid\")\n model_name = payload.get(\"model_name\")\n model_size_in_billions = payload.get(\"model_size_in_billions\")\n model_format = payload.get(\"model_format\")\n quantization = payload.get(\"quantization\")\n\n exclude_keys = {\n \"model_uid\",\n \"model_name\",\n \"model_size_in_billions\",\n \"model_format\",\n \"quantization\",\n }\n\n kwargs = {\n key: value for key, value in payload.items() if key not in exclude_keys\n }\n\n if model_uid is None or model_uid is None:\n raise HTTPException(\n status_code=400,\n detail=\"Invalid input. Please specify the model UID and the model name\",\n )\n\n try:\n await self._supervisor_ref.launch_builtin_model(\n model_uid=model_uid,\n model_name=model_name,\n model_size_in_billions=model_size_in_billions,\n model_format=model_format,\n quantization=quantization,\n **kwargs,\n )\n\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n except RuntimeError as re:\n logger.error(str(re), exc_info=True)\n raise HTTPException(status_code=503, detail=str(re))\n except Exception as e:\n logger.error(str(e), exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n return JSONResponse(content={\"model_uid\": model_uid})\n\n async def terminate_model(self, model_uid: str):\n try:\n await self._supervisor_ref.terminate_model(model_uid)\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def get_address(self):\n return self.address\n\n async def create_completion(self, request: Request, body: CreateCompletionRequest):\n exclude = {\n \"prompt\",\n \"model\",\n \"n\",\n \"best_of\",\n \"logit_bias\",\n \"logit_bias_type\",\n \"user\",\n }\n kwargs = body.dict(exclude=exclude)\n\n if body.logit_bias is not None:\n raise HTTPException(status_code=501, detail=\"Not implemented\")\n\n model_uid = body.model\n\n try:\n model = await self._supervisor_ref.get_model(model_uid)\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n if body.stream:\n # create a pair of memory object streams\n send_chan, recv_chan = anyio.create_memory_object_stream(10)\n\n async def event_publisher(inner_send_chan: MemoryObjectSendStream):\n async with inner_send_chan:\n try:\n iterator = await model.generate(body.prompt, kwargs)\n async for chunk in iterator:\n await inner_send_chan.send(dict(data=json.dumps(chunk)))\n if await request.is_disconnected():\n raise anyio.get_cancelled_exc_class()()\n except anyio.get_cancelled_exc_class() as e:\n logger.warning(\"disconnected\")\n with anyio.move_on_after(1, shield=True):\n logger.warning(\n f\"Disconnected from client (via refresh/close) {request.client}\"\n )\n await inner_send_chan.send(dict(closing=True))\n raise e\n except Exception as e:\n raise HTTPException(status_code=500, detail=str(e))\n\n return EventSourceResponse(\n recv_chan, data_sender_callable=partial(event_publisher, send_chan)\n )\n\n else:\n try:\n return await model.generate(body.prompt, kwargs)\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def create_embedding(self, request: CreateEmbeddingRequest):\n model_uid = request.model\n\n try:\n model = await self._supervisor_ref.get_model(model_uid)\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n input = request.input\n\n try:\n embedding = await model.create_embedding(input)\n return embedding\n except RuntimeError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def create_chat_completion(\n self,\n request: Request,\n body: CreateChatCompletionRequest,\n ):\n exclude = {\n \"n\",\n \"model\",\n \"messages\",\n \"logit_bias\",\n \"logit_bias_type\",\n \"user\",\n }\n kwargs = body.dict(exclude=exclude)\n\n if body.logit_bias is not None:\n raise HTTPException(status_code=501, detail=\"Not implemented\")\n\n if (\n not body.messages\n or body.messages[-1].get(\"role\") != \"user\"\n or not body.messages[-1].get(\"content\")\n ):\n raise HTTPException(\n status_code=400, detail=\"Invalid input. Please specify the prompt.\"\n )\n\n prompt = body.messages[-1][\"content\"]\n\n system_prompt = next(\n (msg[\"content\"] for msg in body.messages if msg[\"role\"] == \"system\"), None\n )\n\n chat_history = body.messages[:-1] # exclude the prompt\n\n model_uid = body.model\n\n try:\n model = await self._supervisor_ref.get_model(model_uid)\n\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n try:\n desc = await self._supervisor_ref.describe_model(model_uid)\n\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n is_chatglm_ggml = desc.get(\n \"model_format\"\n ) == \"ggmlv3\" and \"chatglm\" in desc.get(\"model_name\", \"\")\n\n if is_chatglm_ggml and system_prompt is not None:\n raise HTTPException(\n status_code=400, detail=\"ChatGLM ggml does not have system prompt\"\n )\n\n if body.stream:\n # create a pair of memory object streams\n send_chan, recv_chan = anyio.create_memory_object_stream(10)\n\n async def event_publisher(inner_send_chan: MemoryObjectSendStream):\n async with inner_send_chan:\n try:\n if is_chatglm_ggml:\n iterator = await model.chat(prompt, chat_history, kwargs)\n else:\n iterator = await model.chat(\n prompt, system_prompt, chat_history, kwargs\n )\n async for chunk in iterator:\n await inner_send_chan.send(dict(data=json.dumps(chunk)))\n if await request.is_disconnected():\n raise anyio.get_cancelled_exc_class()()\n except anyio.get_cancelled_exc_class() as e:\n logger.warning(\"disconnected\")\n with anyio.move_on_after(1, shield=True):\n logger.warning(\n f\"Disconnected from client (via refresh/close) {request.client}\"\n )\n await inner_send_chan.send(dict(closing=True))\n raise e\n except Exception as e:\n raise HTTPException(status_code=500, detail=str(e))\n\n return EventSourceResponse(\n recv_chan, data_sender_callable=partial(event_publisher, send_chan)\n )\n\n else:\n try:\n if is_chatglm_ggml:\n return await model.chat(prompt, chat_history, kwargs)\n else:\n return await model.chat(prompt, system_prompt, chat_history, kwargs)\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def register_model(self, model_type: str, request: RegisterModelRequest):\n model = request.model\n persist = request.persist\n\n try:\n await self._supervisor_ref.register_model(model_type, model, persist)\n except ValueError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def unregister_model(self, model_type: str, model_name: str):\n try:\n await self._supervisor_ref.unregister_model(model_type, model_name)\n except ValueError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def list_model_registrations(self, model_type: str) -> List[Dict[str, Any]]:\n try:\n return await self._supervisor_ref.list_model_registrations(model_type)\n except ValueError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def get_model_registrations(\n self, model_type: str, model_name: str\n ) -> Dict[str, Any]:\n try:\n return await self._supervisor_ref.get_model_registration(\n model_type, model_name\n )\n except ValueError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n", "path": "xinference/core/restful_api.py" } ]
[ { "content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nimport socket\nimport threading\nfrom functools import partial\nfrom typing import Any, Dict, List, Literal, Optional, Union\n\nimport anyio\nimport gradio as gr\nimport xoscar as xo\nfrom anyio.streams.memory import MemoryObjectSendStream\nfrom fastapi import APIRouter, FastAPI, HTTPException, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import JSONResponse\nfrom pydantic import BaseModel, Field\nfrom sse_starlette.sse import EventSourceResponse\nfrom typing_extensions import NotRequired, TypedDict\nfrom uvicorn import Config, Server\n\nfrom ..types import ChatCompletion, Completion, Embedding\nfrom .supervisor import SupervisorActor\n\nlogger = logging.getLogger(__name__)\n\nmax_tokens_field = Field(\n default=128, ge=1, le=32768, description=\"The maximum number of tokens to generate.\"\n)\n\ntemperature_field = Field(\n default=0.8,\n ge=0.0,\n le=2.0,\n description=\"Adjust the randomness of the generated text.\\n\\n\"\n + \"Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run.\",\n)\n\ntop_p_field = Field(\n default=0.95,\n ge=0.0,\n le=1.0,\n description=\"Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\\n\\n\"\n + \"Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.\",\n)\n\nstop_field = Field(\n default=None,\n description=\"A list of tokens at which to stop generation. If None, no stop tokens are used.\",\n)\n\nstream_field = Field(\n default=False,\n description=\"Whether to stream the results as they are generated. Useful for chatbots.\",\n)\n\ntop_k_field = Field(\n default=40,\n ge=0,\n description=\"Limit the next token selection to the K most probable tokens.\\n\\n\"\n + \"Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text.\",\n)\n\nrepetition_penalty_field = Field(\n default=1.1,\n ge=0.0,\n description=\"A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\\n\\n\"\n + \"Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient.\",\n)\n\npresence_penalty_field = Field(\n default=0.0,\n ge=-2.0,\n le=2.0,\n description=\"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\",\n)\n\nfrequency_penalty_field = Field(\n default=0.0,\n ge=-2.0,\n le=2.0,\n description=\"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\",\n)\n\nmirostat_mode_field = Field(\n default=0,\n ge=0,\n le=2,\n description=\"Enable Mirostat constant-perplexity algorithm of the specified version (1 or 2; 0 = disabled)\",\n)\n\nmirostat_tau_field = Field(\n default=5.0,\n ge=0.0,\n le=10.0,\n description=\"Mirostat target entropy, i.e. the target perplexity - lower values produce focused and coherent text, larger values produce more diverse and less coherent text\",\n)\n\nmirostat_eta_field = Field(\n default=0.1, ge=0.001, le=1.0, description=\"Mirostat learning rate\"\n)\n\n\nclass CreateCompletionRequest(BaseModel):\n prompt: str\n suffix: Optional[str] = Field(None)\n max_tokens: int = max_tokens_field\n temperature: float = temperature_field\n top_p: float = top_p_field\n mirostat_mode: int = mirostat_mode_field\n mirostat_tau: float = mirostat_tau_field\n mirostat_eta: float = mirostat_eta_field\n echo: bool = Field(\n default=False,\n description=\"Whether to echo the prompt in the generated text. Useful for chatbots.\",\n )\n stop: Optional[Union[str, List[str]]] = stop_field\n stream: bool = stream_field\n logprobs: Optional[int] = Field(\n default=None,\n ge=0,\n description=\"The number of logprobs to generate. If None, no logprobs are generated.\",\n )\n presence_penalty: Optional[float] = presence_penalty_field\n frequency_penalty: Optional[float] = frequency_penalty_field\n logit_bias: Optional[Dict[str, float]] = Field(None)\n\n model: str\n n: Optional[int] = 1\n best_of: Optional[int] = 1\n user: Optional[str] = Field(None)\n\n # llama.cpp specific parameters\n top_k: int = top_k_field\n repetition_penalty: float = repetition_penalty_field\n logit_bias_type: Optional[Literal[\"input_ids\", \"tokens\"]] = Field(None)\n\n class Config:\n schema_extra = {\n \"example\": {\n \"prompt\": \"\\n\\n### Instructions:\\nWhat is the capital of France?\\n\\n### Response:\\n\",\n \"stop\": [\"\\n\", \"###\"],\n }\n }\n\n\nclass CreateEmbeddingRequest(BaseModel):\n model: str\n input: Union[str, List[str]] = Field(description=\"The input to embed.\")\n user: Optional[str] = None\n\n class Config:\n schema_extra = {\n \"example\": {\n \"input\": \"The food was delicious and the waiter...\",\n }\n }\n\n\nclass ChatCompletionRequestMessage(TypedDict):\n role: Literal[\"assistant\", \"user\", \"system\"]\n content: str\n user: NotRequired[str]\n\n\nclass CreateChatCompletionRequest(BaseModel):\n messages: List[ChatCompletionRequestMessage] = Field(\n default=[], description=\"A list of messages to generate completions for.\"\n )\n max_tokens: int = max_tokens_field\n temperature: float = temperature_field\n top_p: float = top_p_field\n mirostat_mode: int = mirostat_mode_field\n mirostat_tau: float = mirostat_tau_field\n mirostat_eta: float = mirostat_eta_field\n stop: Optional[Union[str, List[str]]] = stop_field\n stream: bool = stream_field\n presence_penalty: Optional[float] = presence_penalty_field\n frequency_penalty: Optional[float] = frequency_penalty_field\n logit_bias: Optional[Dict[str, float]] = Field(None)\n\n model: str\n n: Optional[int] = 1\n user: Optional[str] = Field(None)\n\n # llama.cpp specific parameters\n top_k: int = top_k_field\n repetition_penalty: float = repetition_penalty_field\n logit_bias_type: Optional[Literal[\"input_ids\", \"tokens\"]] = Field(None)\n\n class Config:\n schema_extra = {\n \"example\": {\n \"messages\": [\n {\"role\": \"system\", \"content\": \"you are a helpful AI assistant\"},\n {\"role\": \"user\", \"content\": \"Hello!\"},\n {\"role\": \"assistant\", \"content\": \"Hi what can I help you?\"},\n ]\n }\n }\n\n\nclass RegisterModelRequest(BaseModel):\n model: str\n persist: bool\n\n\nclass RESTfulAPIActor(xo.Actor):\n def __init__(self, sockets: List[socket.socket], gradio_block: gr.Blocks):\n super().__init__()\n self._supervisor_ref: xo.ActorRefType[\"SupervisorActor\"]\n self._sockets = sockets\n self._gradio_block = gradio_block\n self._router = None\n\n @classmethod\n def uid(cls) -> str:\n return \"RESTfulAPI\"\n\n async def __post_create__(self):\n self._supervisor_ref = await xo.actor_ref(\n address=self.address, uid=SupervisorActor.uid()\n )\n\n def serve(self):\n app = FastAPI()\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n self._router = APIRouter()\n self._router.add_api_route(\"/v1/models\", self.list_models, methods=[\"GET\"])\n self._router.add_api_route(\n \"/v1/models/{model_uid}\", self.describe_model, methods=[\"GET\"]\n )\n self._router.add_api_route(\"/v1/models\", self.launch_model, methods=[\"POST\"])\n self._router.add_api_route(\n \"/v1/models/{model_uid}\", self.terminate_model, methods=[\"DELETE\"]\n )\n self._router.add_api_route(\"/v1/address\", self.get_address, methods=[\"GET\"])\n self._router.add_api_route(\n \"/v1/completions\",\n self.create_completion,\n methods=[\"POST\"],\n response_model=Completion,\n )\n self._router.add_api_route(\n \"/v1/embeddings\",\n self.create_embedding,\n methods=[\"POST\"],\n response_model=Embedding,\n )\n self._router.add_api_route(\n \"/v1/chat/completions\",\n self.create_chat_completion,\n methods=[\"POST\"],\n response_model=ChatCompletion,\n )\n\n # for custom models\n self._router.add_api_route(\n \"/v1/model_registrations/{model_type}\",\n self.register_model,\n methods=[\"POST\"],\n )\n self._router.add_api_route(\n \"/v1/model_registrations/{model_type}/{model_name}\",\n self.unregister_model,\n methods=[\"DELETE\"],\n )\n self._router.add_api_route(\n \"/v1/model_registrations/{model_type}\",\n self.list_model_registrations,\n methods=[\"GET\"],\n )\n self._router.add_api_route(\n \"/v1/model_registrations/{model_type}/{model_name}\",\n self.get_model_registrations,\n methods=[\"GET\"],\n )\n\n app.include_router(self._router)\n app = gr.mount_gradio_app(app, self._gradio_block, path=\"/\")\n\n # run uvicorn in another daemon thread.\n config = Config(app=app, log_level=\"critical\")\n server = Server(config)\n\n def _serve():\n httpx_logger = logging.getLogger(\"httpx\")\n httpx_logger.setLevel(logging.CRITICAL)\n server.run(self._sockets)\n\n server_thread = threading.Thread(target=_serve, daemon=True)\n server_thread.start()\n\n async def list_models(self) -> Dict[str, Dict[str, Any]]:\n try:\n return await self._supervisor_ref.list_models()\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def describe_model(self, model_uid: str) -> Dict[str, Any]:\n try:\n return await self._supervisor_ref.describe_model(model_uid)\n\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def launch_model(self, request: Request) -> JSONResponse:\n payload = await request.json()\n model_uid = payload.get(\"model_uid\")\n model_name = payload.get(\"model_name\")\n model_size_in_billions = payload.get(\"model_size_in_billions\")\n model_format = payload.get(\"model_format\")\n quantization = payload.get(\"quantization\")\n\n exclude_keys = {\n \"model_uid\",\n \"model_name\",\n \"model_size_in_billions\",\n \"model_format\",\n \"quantization\",\n }\n\n kwargs = {\n key: value for key, value in payload.items() if key not in exclude_keys\n }\n\n if model_uid is None or model_uid is None:\n raise HTTPException(\n status_code=400,\n detail=\"Invalid input. Please specify the model UID and the model name\",\n )\n\n try:\n await self._supervisor_ref.launch_builtin_model(\n model_uid=model_uid,\n model_name=model_name,\n model_size_in_billions=model_size_in_billions,\n model_format=model_format,\n quantization=quantization,\n **kwargs,\n )\n\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n except RuntimeError as re:\n logger.error(str(re), exc_info=True)\n raise HTTPException(status_code=503, detail=str(re))\n except Exception as e:\n logger.error(str(e), exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n return JSONResponse(content={\"model_uid\": model_uid})\n\n async def terminate_model(self, model_uid: str):\n try:\n await self._supervisor_ref.terminate_model(model_uid)\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def get_address(self):\n return self.address\n\n async def create_completion(self, request: Request, body: CreateCompletionRequest):\n exclude = {\n \"prompt\",\n \"model\",\n \"n\",\n \"best_of\",\n \"logit_bias\",\n \"logit_bias_type\",\n \"user\",\n }\n kwargs = body.dict(exclude=exclude)\n\n if body.logit_bias is not None:\n raise HTTPException(status_code=501, detail=\"Not implemented\")\n\n model_uid = body.model\n\n try:\n model = await self._supervisor_ref.get_model(model_uid)\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n if body.stream:\n # create a pair of memory object streams\n send_chan, recv_chan = anyio.create_memory_object_stream(10)\n\n async def event_publisher(inner_send_chan: MemoryObjectSendStream):\n async with inner_send_chan:\n try:\n iterator = await model.generate(body.prompt, kwargs)\n async for chunk in iterator:\n await inner_send_chan.send(dict(data=json.dumps(chunk)))\n if await request.is_disconnected():\n raise anyio.get_cancelled_exc_class()()\n except anyio.get_cancelled_exc_class() as e:\n logger.warning(\"disconnected\")\n with anyio.move_on_after(1, shield=True):\n logger.warning(\n f\"Disconnected from client (via refresh/close) {request.client}\"\n )\n await inner_send_chan.send(dict(closing=True))\n raise e\n except Exception as e:\n raise HTTPException(status_code=500, detail=str(e))\n\n return EventSourceResponse(\n recv_chan, data_sender_callable=partial(event_publisher, send_chan)\n )\n\n else:\n try:\n return await model.generate(body.prompt, kwargs)\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def create_embedding(self, request: CreateEmbeddingRequest):\n model_uid = request.model\n\n try:\n model = await self._supervisor_ref.get_model(model_uid)\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n input = request.input\n\n try:\n embedding = await model.create_embedding(input)\n return embedding\n except RuntimeError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def create_chat_completion(\n self,\n request: Request,\n body: CreateChatCompletionRequest,\n ):\n exclude = {\n \"n\",\n \"model\",\n \"messages\",\n \"logit_bias\",\n \"logit_bias_type\",\n \"user\",\n }\n kwargs = body.dict(exclude=exclude)\n\n if body.logit_bias is not None:\n raise HTTPException(status_code=501, detail=\"Not implemented\")\n\n if (\n not body.messages\n or body.messages[-1].get(\"role\") != \"user\"\n or not body.messages[-1].get(\"content\")\n ):\n raise HTTPException(\n status_code=400, detail=\"Invalid input. Please specify the prompt.\"\n )\n\n prompt = body.messages[-1][\"content\"]\n\n system_prompt = next(\n (msg[\"content\"] for msg in body.messages if msg[\"role\"] == \"system\"), None\n )\n\n chat_history = body.messages[:-1] # exclude the prompt\n\n model_uid = body.model\n\n try:\n model = await self._supervisor_ref.get_model(model_uid)\n\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n try:\n desc = await self._supervisor_ref.describe_model(model_uid)\n\n except ValueError as ve:\n logger.error(str(ve), exc_info=True)\n raise HTTPException(status_code=400, detail=str(ve))\n\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n is_chatglm_ggml = desc.get(\n \"model_format\"\n ) == \"ggmlv3\" and \"chatglm\" in desc.get(\"model_name\", \"\")\n\n if is_chatglm_ggml and system_prompt is not None:\n raise HTTPException(\n status_code=400, detail=\"ChatGLM ggml does not have system prompt\"\n )\n\n if body.stream:\n # create a pair of memory object streams\n send_chan, recv_chan = anyio.create_memory_object_stream(10)\n\n async def event_publisher(inner_send_chan: MemoryObjectSendStream):\n async with inner_send_chan:\n try:\n if is_chatglm_ggml:\n iterator = await model.chat(prompt, chat_history, kwargs)\n else:\n iterator = await model.chat(\n prompt, system_prompt, chat_history, kwargs\n )\n async for chunk in iterator:\n await inner_send_chan.send(dict(data=json.dumps(chunk)))\n if await request.is_disconnected():\n raise anyio.get_cancelled_exc_class()()\n except anyio.get_cancelled_exc_class() as e:\n logger.warning(\"disconnected\")\n with anyio.move_on_after(1, shield=True):\n logger.warning(\n f\"Disconnected from client (via refresh/close) {request.client}\"\n )\n await inner_send_chan.send(dict(closing=True))\n raise e\n except Exception as e:\n raise HTTPException(status_code=500, detail=str(e))\n\n return EventSourceResponse(\n recv_chan, data_sender_callable=partial(event_publisher, send_chan)\n )\n\n else:\n try:\n if is_chatglm_ggml:\n return await model.chat(prompt, chat_history, kwargs)\n else:\n return await model.chat(prompt, system_prompt, chat_history, kwargs)\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def register_model(self, model_type: str, request: RegisterModelRequest):\n model = request.model\n persist = request.persist\n\n try:\n await self._supervisor_ref.register_model(model_type, model, persist)\n except ValueError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def unregister_model(self, model_type: str, model_name: str):\n try:\n await self._supervisor_ref.unregister_model(model_type, model_name)\n except ValueError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def list_model_registrations(self, model_type: str) -> List[Dict[str, Any]]:\n try:\n return await self._supervisor_ref.list_model_registrations(model_type)\n except ValueError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n\n async def get_model_registrations(\n self, model_type: str, model_name: str\n ) -> Dict[str, Any]:\n try:\n return await self._supervisor_ref.get_model_registration(\n model_type, model_name\n )\n except ValueError as re:\n logger.error(re, exc_info=True)\n raise HTTPException(status_code=400, detail=str(re))\n except Exception as e:\n logger.error(e, exc_info=True)\n raise HTTPException(status_code=500, detail=str(e))\n", "path": "xinference/core/restful_api.py" } ]
diff --git a/xinference/core/restful_api.py b/xinference/core/restful_api.py index e2231d53f1..61908cc603 100644 --- a/xinference/core/restful_api.py +++ b/xinference/core/restful_api.py @@ -37,7 +37,7 @@ logger = logging.getLogger(__name__) max_tokens_field = Field( - default=128, ge=1, le=2048, description="The maximum number of tokens to generate." + default=128, ge=1, le=32768, description="The maximum number of tokens to generate." ) temperature_field = Field(
Textualize__rich-211
[BUG] Deprecation warning due to invalid escape sequences **Describe the bug** Deprecation warnings are raised due to invalid escape sequences. This can be fixed by using raw strings or escaping the literals. pyupgrade also helps in automatic conversion : https://github.com/asottile/pyupgrade/ **To Reproduce** ``` ./tests/test_markup.py:26: DeprecationWarning: invalid escape sequence \[ assert escape("foo[bar]") == "foo\[bar]" ./tests/test_markup.py:30: DeprecationWarning: invalid escape sequence \[ result = list(_parse("[foo]hello[/foo][bar]world[/]\[escaped]")) ./rich/markup.py:50: DeprecationWarning: invalid escape sequence \[ return markup.replace("[", "\[") ``` **Platform** What platform (Win/Linux/Mac) are you running on? What terminal software are you using. Which version of Rich?
[ { "content": "import re\nfrom typing import Iterable, List, NamedTuple, Optional, Tuple, Union\n\nfrom .errors import MarkupError\nfrom .style import Style\nfrom .text import Span, Text\nfrom ._emoji_replace import _emoji_replace\n\n\nRE_TAGS = re.compile(\n r\"\"\"\n(\\\\\\[)|\n\\[([a-z#\\/].*?)\\]\n\"\"\",\n re.VERBOSE,\n)\n\n\nclass Tag(NamedTuple):\n \"\"\"A tag in console markup.\"\"\"\n\n name: str\n \"\"\"The tag name. e.g. 'bold'.\"\"\"\n parameters: Optional[str]\n \"\"\"Any additional parameters after the name.\"\"\"\n\n def __str__(self) -> str:\n return (\n self.name if self.parameters is None else f\"{self.name} {self.parameters}\"\n )\n\n @property\n def markup(self) -> str:\n return (\n f\"[{self.name}]\"\n if self.parameters is None\n else f\"[{self.name}={self.parameters}]\"\n )\n\n\ndef escape(markup: str) -> str:\n \"\"\"Escapes text so that it won't be interpreted as markup. \n\n Args:\n markup (str): Content to be inserted in to markup.\n\n Returns:\n str: Markup with square brackets escaped.\n \"\"\"\n return markup.replace(\"[\", \"\\[\")\n\n\ndef _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:\n \"\"\"Parse markup in to an iterable of tuples of (position, text, tag).\n \n Args:\n markup (str): A string containing console markup\n \n \"\"\"\n position = 0\n for match in RE_TAGS.finditer(markup):\n (escape_open, tag_text) = match.groups()\n start, end = match.span()\n if start > position:\n yield start, markup[position:start], None\n if escape_open:\n yield start, \"[\", None\n else:\n text, equals, parameters = tag_text.partition(\"=\")\n if equals:\n yield start, None, Tag(text, parameters)\n else:\n yield start, None, Tag(tag_text.strip(), None)\n position = end\n if position < len(markup):\n yield position, markup[position:], None\n\n\ndef render(markup: str, style: Union[str, Style] = \"\", emoji: bool = True) -> Text:\n \"\"\"Render console markup in to a Text instance.\n\n Args:\n markup (str): A string containing console markup.\n emoji (bool, optional): Also render emoji code. Defaults to True.\n \n Raises:\n MarkupError: If there is a syntax error in the markup.\n \n Returns:\n Text: A test instance.\n \"\"\"\n emoji_replace = _emoji_replace\n if \"[\" not in markup:\n return Text(emoji_replace(markup) if emoji else markup, style=style)\n text = Text(style=style)\n append = text.append\n normalize = Style.normalize\n\n style_stack: List[Tuple[int, Tag]] = []\n pop = style_stack.pop\n\n spans: List[Span] = []\n append_span = spans.append\n\n _Span = Span\n\n def pop_style(style_name: str) -> Tuple[int, Tag]:\n \"\"\"Pop tag matching given style name.\"\"\"\n for index, (_, tag) in enumerate(reversed(style_stack), 1):\n if tag.name == style_name:\n return pop(-index)\n raise KeyError(style_name)\n\n for position, plain_text, tag in _parse(markup):\n if plain_text is not None:\n append(emoji_replace(plain_text) if emoji else plain_text)\n elif tag is not None:\n if tag.name.startswith(\"/\"): # Closing tag\n style_name = tag.name[1:].strip()\n if style_name: # explicit close\n style_name = normalize(style_name)\n try:\n start, open_tag = pop_style(style_name)\n except KeyError:\n raise MarkupError(\n f\"closing tag '{tag.markup}' at position {position} doesn't match any open tag\"\n )\n else: # implicit close\n try:\n start, open_tag = pop()\n except IndexError:\n raise MarkupError(\n f\"closing tag '[/]' at position {position} has nothing to close\"\n )\n\n append_span(_Span(start, len(text), str(open_tag)))\n else: # Opening tag\n normalized_tag = Tag(normalize(tag.name), tag.parameters)\n style_stack.append((len(text), normalized_tag))\n\n text_length = len(text)\n while style_stack:\n start, tag = style_stack.pop()\n append_span(_Span(start, text_length, str(tag)))\n\n text.spans = sorted(spans)\n return text\n\n\nif __name__ == \"__main__\": # pragma: no cover\n # from rich import print\n from rich.console import Console\n from rich.text import Text\n\n console = Console(highlight=False)\n\n # t = Text.from_markup('Hello [link=\"https://www.willmcgugan.com\"]W[b]o[/b]rld[/]!')\n # print(repr(t._spans))\n\n console.print(\"Hello [1], [1,2,3] ['hello']\")\n console.print(\"foo\")\n console.print(\"Hello [link=https://www.willmcgugan.com]W[b]o[/b]rld[/]!\")\n\n # console.print(\"[bold]1 [not bold]2[/] 3[/]\")\n\n # console.print(\"[green]XXX[blue]XXX[/]XXX[/]\")\n", "path": "rich/markup.py" } ]
[ { "content": "import re\nfrom typing import Iterable, List, NamedTuple, Optional, Tuple, Union\n\nfrom .errors import MarkupError\nfrom .style import Style\nfrom .text import Span, Text\nfrom ._emoji_replace import _emoji_replace\n\n\nRE_TAGS = re.compile(\n r\"\"\"\n(\\\\\\[)|\n\\[([a-z#\\/].*?)\\]\n\"\"\",\n re.VERBOSE,\n)\n\n\nclass Tag(NamedTuple):\n \"\"\"A tag in console markup.\"\"\"\n\n name: str\n \"\"\"The tag name. e.g. 'bold'.\"\"\"\n parameters: Optional[str]\n \"\"\"Any additional parameters after the name.\"\"\"\n\n def __str__(self) -> str:\n return (\n self.name if self.parameters is None else f\"{self.name} {self.parameters}\"\n )\n\n @property\n def markup(self) -> str:\n return (\n f\"[{self.name}]\"\n if self.parameters is None\n else f\"[{self.name}={self.parameters}]\"\n )\n\n\ndef escape(markup: str) -> str:\n \"\"\"Escapes text so that it won't be interpreted as markup. \n\n Args:\n markup (str): Content to be inserted in to markup.\n\n Returns:\n str: Markup with square brackets escaped.\n \"\"\"\n return markup.replace(\"[\", r\"\\[\")\n\n\ndef _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:\n \"\"\"Parse markup in to an iterable of tuples of (position, text, tag).\n \n Args:\n markup (str): A string containing console markup\n \n \"\"\"\n position = 0\n for match in RE_TAGS.finditer(markup):\n (escape_open, tag_text) = match.groups()\n start, end = match.span()\n if start > position:\n yield start, markup[position:start], None\n if escape_open:\n yield start, \"[\", None\n else:\n text, equals, parameters = tag_text.partition(\"=\")\n if equals:\n yield start, None, Tag(text, parameters)\n else:\n yield start, None, Tag(tag_text.strip(), None)\n position = end\n if position < len(markup):\n yield position, markup[position:], None\n\n\ndef render(markup: str, style: Union[str, Style] = \"\", emoji: bool = True) -> Text:\n \"\"\"Render console markup in to a Text instance.\n\n Args:\n markup (str): A string containing console markup.\n emoji (bool, optional): Also render emoji code. Defaults to True.\n \n Raises:\n MarkupError: If there is a syntax error in the markup.\n \n Returns:\n Text: A test instance.\n \"\"\"\n emoji_replace = _emoji_replace\n if \"[\" not in markup:\n return Text(emoji_replace(markup) if emoji else markup, style=style)\n text = Text(style=style)\n append = text.append\n normalize = Style.normalize\n\n style_stack: List[Tuple[int, Tag]] = []\n pop = style_stack.pop\n\n spans: List[Span] = []\n append_span = spans.append\n\n _Span = Span\n\n def pop_style(style_name: str) -> Tuple[int, Tag]:\n \"\"\"Pop tag matching given style name.\"\"\"\n for index, (_, tag) in enumerate(reversed(style_stack), 1):\n if tag.name == style_name:\n return pop(-index)\n raise KeyError(style_name)\n\n for position, plain_text, tag in _parse(markup):\n if plain_text is not None:\n append(emoji_replace(plain_text) if emoji else plain_text)\n elif tag is not None:\n if tag.name.startswith(\"/\"): # Closing tag\n style_name = tag.name[1:].strip()\n if style_name: # explicit close\n style_name = normalize(style_name)\n try:\n start, open_tag = pop_style(style_name)\n except KeyError:\n raise MarkupError(\n f\"closing tag '{tag.markup}' at position {position} doesn't match any open tag\"\n )\n else: # implicit close\n try:\n start, open_tag = pop()\n except IndexError:\n raise MarkupError(\n f\"closing tag '[/]' at position {position} has nothing to close\"\n )\n\n append_span(_Span(start, len(text), str(open_tag)))\n else: # Opening tag\n normalized_tag = Tag(normalize(tag.name), tag.parameters)\n style_stack.append((len(text), normalized_tag))\n\n text_length = len(text)\n while style_stack:\n start, tag = style_stack.pop()\n append_span(_Span(start, text_length, str(tag)))\n\n text.spans = sorted(spans)\n return text\n\n\nif __name__ == \"__main__\": # pragma: no cover\n # from rich import print\n from rich.console import Console\n from rich.text import Text\n\n console = Console(highlight=False)\n\n # t = Text.from_markup('Hello [link=\"https://www.willmcgugan.com\"]W[b]o[/b]rld[/]!')\n # print(repr(t._spans))\n\n console.print(\"Hello [1], [1,2,3] ['hello']\")\n console.print(\"foo\")\n console.print(\"Hello [link=https://www.willmcgugan.com]W[b]o[/b]rld[/]!\")\n\n # console.print(\"[bold]1 [not bold]2[/] 3[/]\")\n\n # console.print(\"[green]XXX[blue]XXX[/]XXX[/]\")\n", "path": "rich/markup.py" } ]
diff --git a/rich/markup.py b/rich/markup.py index fb6f45c61..9af1cd9dd 100644 --- a/rich/markup.py +++ b/rich/markup.py @@ -47,7 +47,7 @@ def escape(markup: str) -> str: Returns: str: Markup with square brackets escaped. """ - return markup.replace("[", "\[") + return markup.replace("[", r"\[") def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]: diff --git a/tests/test_markup.py b/tests/test_markup.py index e345eaecc..8e254e9a3 100644 --- a/tests/test_markup.py +++ b/tests/test_markup.py @@ -23,11 +23,11 @@ def test_re_match(): def test_escape(): - assert escape("foo[bar]") == "foo\[bar]" + assert escape("foo[bar]") == r"foo\[bar]" def test_parse(): - result = list(_parse("[foo]hello[/foo][bar]world[/]\[escaped]")) + result = list(_parse(r"[foo]hello[/foo][bar]world[/]\[escaped]")) expected = [ (0, None, Tag(name="foo", parameters=None)), (10, "hello", None),
pytorch__ignite-2852
[CI] Doc test is failing We have somehow doctest ci job failing right now. The failure happens with the following code snippet from our docs: - https://pytorch.org/ignite/generated/ignite.contrib.metrics.PrecisionRecallCurve.html ``` ********************************************************************** File "../../ignite/contrib/metrics/precision_recall_curve.py", line ?, in default Failed example: y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) y_true = torch.tensor([0, 0, 1, 1]) prec_recall_curve = PrecisionRecallCurve() prec_recall_curve.attach(default_evaluator, 'prec_recall_curve') state = default_evaluator.run([[y_pred, y_true]]) print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()]) print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()]) print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()]) Expected: Precision [1.0, 1.0, 1.0] Recall [1.0, 0.5, 0.0] Thresholds [0.7109, 0.9997] Got: Precision [0.5, 0.6667, 1.0, 1.0, 1.0] Recall [1.0, 1.0, 1.0, 0.5, 0.0] Thresholds [0.0474, 0.5987, 0.7109, 0.9997] ``` - https://github.com/pytorch/ignite/actions/runs/4099985910/jobs/7074343114 ### How to help with this issue You need to do some detective work: - Reproduce the issue locally - Try to figure out which result is correct: "Expected" or "Got" - Try to figure out why it started to happen: maybe sklearn version updated ? Previously, for example Jan 18, doctest was passing: https://github.com/pytorch/ignite/actions/runs/3894024421/jobs/6647420435 - Report here your findings and propose a way to solve the issue
[ { "content": "from typing import Any, Callable, cast, Tuple, Union\n\nimport torch\n\nimport ignite.distributed as idist\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics import EpochMetric\n\n\ndef precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:\n try:\n from sklearn.metrics import precision_recall_curve\n except ImportError:\n raise ModuleNotFoundError(\"This contrib module requires scikit-learn to be installed.\")\n\n y_true = y_targets.cpu().numpy()\n y_pred = y_preds.cpu().numpy()\n return precision_recall_curve(y_true, y_pred)\n\n\nclass PrecisionRecallCurve(EpochMetric):\n \"\"\"Compute precision-recall pairs for different probability thresholds for binary classification task\n by accumulating predictions and the ground-truth during an epoch and applying\n `sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n check_compute_fn: Default False. If True, `precision_recall_curve\n <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html\n #sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are\n no issues. User will be warned in case there are any issues computing the function.\n\n Note:\n PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates\n or confidence values. To apply an activation to y_pred, use output_transform as shown below:\n\n .. code-block:: python\n\n def sigmoid_output_transform(output):\n y_pred, y = output\n y_pred = torch.sigmoid(y_pred)\n return y_pred, y\n avg_precision = PrecisionRecallCurve(sigmoid_output_transform)\n\n Examples:\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])\n y_true = torch.tensor([0, 0, 1, 1])\n prec_recall_curve = PrecisionRecallCurve()\n prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')\n state = default_evaluator.run([[y_pred, y_true]])\n\n print(\"Precision\", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])\n print(\"Recall\", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])\n print(\"Thresholds\", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])\n\n .. testoutput::\n\n Precision [1.0, 1.0, 1.0]\n Recall [1.0, 0.5, 0.0]\n Thresholds [0.7109, 0.9997]\n\n \"\"\"\n\n def __init__(\n self,\n output_transform: Callable = lambda x: x,\n check_compute_fn: bool = False,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n super(PrecisionRecallCurve, self).__init__(\n precision_recall_curve_compute_fn,\n output_transform=output_transform,\n check_compute_fn=check_compute_fn,\n device=device,\n )\n\n def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n if len(self._predictions) < 1 or len(self._targets) < 1:\n raise NotComputableError(\"PrecisionRecallCurve must have at least one example before it can be computed.\")\n\n _prediction_tensor = torch.cat(self._predictions, dim=0)\n _target_tensor = torch.cat(self._targets, dim=0)\n\n ws = idist.get_world_size()\n if ws > 1 and not self._is_reduced:\n # All gather across all processes\n _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))\n _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))\n self._is_reduced = True\n\n if idist.get_rank() == 0:\n # Run compute_fn on zero rank only\n precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor)\n precision = torch.tensor(precision)\n recall = torch.tensor(recall)\n # thresholds can have negative strides, not compatible with torch tensors\n # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2\n thresholds = torch.tensor(thresholds.copy())\n else:\n precision, recall, thresholds = None, None, None\n\n if ws > 1:\n # broadcast result to all processes\n precision = idist.broadcast(precision, src=0, safe_mode=True)\n recall = idist.broadcast(recall, src=0, safe_mode=True)\n thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)\n\n return precision, recall, thresholds\n", "path": "ignite/contrib/metrics/precision_recall_curve.py" } ]
[ { "content": "from typing import Any, Callable, cast, Tuple, Union\n\nimport torch\n\nimport ignite.distributed as idist\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics import EpochMetric\n\n\ndef precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:\n try:\n from sklearn.metrics import precision_recall_curve\n except ImportError:\n raise ModuleNotFoundError(\"This contrib module requires scikit-learn to be installed.\")\n\n y_true = y_targets.cpu().numpy()\n y_pred = y_preds.cpu().numpy()\n return precision_recall_curve(y_true, y_pred)\n\n\nclass PrecisionRecallCurve(EpochMetric):\n \"\"\"Compute precision-recall pairs for different probability thresholds for binary classification task\n by accumulating predictions and the ground-truth during an epoch and applying\n `sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n check_compute_fn: Default False. If True, `precision_recall_curve\n <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html\n #sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are\n no issues. User will be warned in case there are any issues computing the function.\n\n Note:\n PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates\n or confidence values. To apply an activation to y_pred, use output_transform as shown below:\n\n .. code-block:: python\n\n def sigmoid_output_transform(output):\n y_pred, y = output\n y_pred = torch.sigmoid(y_pred)\n return y_pred, y\n avg_precision = PrecisionRecallCurve(sigmoid_output_transform)\n\n Examples:\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])\n y_true = torch.tensor([0, 0, 1, 1])\n prec_recall_curve = PrecisionRecallCurve()\n prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')\n state = default_evaluator.run([[y_pred, y_true]])\n\n print(\"Precision\", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])\n print(\"Recall\", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])\n print(\"Thresholds\", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])\n\n .. testoutput::\n\n Precision [0.5, 0.6667, 1.0, 1.0, 1.0]\n Recall [1.0, 1.0, 1.0, 0.5, 0.0]\n Thresholds [0.0474, 0.5987, 0.7109, 0.9997]\n\n \"\"\"\n\n def __init__(\n self,\n output_transform: Callable = lambda x: x,\n check_compute_fn: bool = False,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n super(PrecisionRecallCurve, self).__init__(\n precision_recall_curve_compute_fn,\n output_transform=output_transform,\n check_compute_fn=check_compute_fn,\n device=device,\n )\n\n def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n if len(self._predictions) < 1 or len(self._targets) < 1:\n raise NotComputableError(\"PrecisionRecallCurve must have at least one example before it can be computed.\")\n\n _prediction_tensor = torch.cat(self._predictions, dim=0)\n _target_tensor = torch.cat(self._targets, dim=0)\n\n ws = idist.get_world_size()\n if ws > 1 and not self._is_reduced:\n # All gather across all processes\n _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))\n _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))\n self._is_reduced = True\n\n if idist.get_rank() == 0:\n # Run compute_fn on zero rank only\n precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor)\n precision = torch.tensor(precision)\n recall = torch.tensor(recall)\n # thresholds can have negative strides, not compatible with torch tensors\n # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2\n thresholds = torch.tensor(thresholds.copy())\n else:\n precision, recall, thresholds = None, None, None\n\n if ws > 1:\n # broadcast result to all processes\n precision = idist.broadcast(precision, src=0, safe_mode=True)\n recall = idist.broadcast(recall, src=0, safe_mode=True)\n thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)\n\n return precision, recall, thresholds\n", "path": "ignite/contrib/metrics/precision_recall_curve.py" } ]
diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 3126c27ea2ba..d45a31fe5032 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -65,9 +65,9 @@ def sigmoid_output_transform(output): .. testoutput:: - Precision [1.0, 1.0, 1.0] - Recall [1.0, 0.5, 0.0] - Thresholds [0.7109, 0.9997] + Precision [0.5, 0.6667, 1.0, 1.0, 1.0] + Recall [1.0, 1.0, 1.0, 0.5, 0.0] + Thresholds [0.0474, 0.5987, 0.7109, 0.9997] """
python-pillow__Pillow-1686
Repeated looping over image stack shows last frame in place of first frame When looping through the frames in an animation or TIFF stack with `ImageSequence.Iterator`, the frame pointer is not reset for the first frame. Consequently, if the loop is run through a second time the final frame is shown again instead of the first frame. ### Demo Code ``` python from PIL import Image, ImageSequence import os # Make a test image os.system(( "convert -depth 8 -size 1x1 xc:'rgb(100,100,100)' xc:'rgb(121,121,121)'" " xc:'rgb(142,142,142)' xc:'rgb(163,163,163)' image.tif" )) # Open the image im = Image.open('image.tif') # Run through the image print('First run') for frame in ImageSequence.Iterator(im): print(list(frame.getdata())) # Run through the image again print('Second run') for frame in ImageSequence.Iterator(im): print(list(frame.getdata())) ``` Output ``` First run [100] [121] [142] [163] Second run [163] [121] [142] [163] ```
[ { "content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# sequence support classes\n#\n# history:\n# 1997-02-20 fl Created\n#\n# Copyright (c) 1997 by Secret Labs AB.\n# Copyright (c) 1997 by Fredrik Lundh.\n#\n# See the README file for information on usage and redistribution.\n#\n\n##\n\n\nclass Iterator(object):\n \"\"\"\n This class implements an iterator object that can be used to loop\n over an image sequence.\n\n You can use the ``[]`` operator to access elements by index. This operator\n will raise an :py:exc:`IndexError` if you try to access a nonexistent\n frame.\n\n :param im: An image object.\n \"\"\"\n\n def __init__(self, im):\n if not hasattr(im, \"seek\"):\n raise AttributeError(\"im must have seek method\")\n self.im = im\n\n def __getitem__(self, ix):\n try:\n if ix:\n self.im.seek(ix)\n return self.im\n except EOFError:\n raise IndexError # end of sequence\n", "path": "PIL/ImageSequence.py" } ]
[ { "content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# sequence support classes\n#\n# history:\n# 1997-02-20 fl Created\n#\n# Copyright (c) 1997 by Secret Labs AB.\n# Copyright (c) 1997 by Fredrik Lundh.\n#\n# See the README file for information on usage and redistribution.\n#\n\n##\n\n\nclass Iterator(object):\n \"\"\"\n This class implements an iterator object that can be used to loop\n over an image sequence.\n\n You can use the ``[]`` operator to access elements by index. This operator\n will raise an :py:exc:`IndexError` if you try to access a nonexistent\n frame.\n\n :param im: An image object.\n \"\"\"\n\n def __init__(self, im):\n if not hasattr(im, \"seek\"):\n raise AttributeError(\"im must have seek method\")\n self.im = im\n\n def __getitem__(self, ix):\n try:\n self.im.seek(ix)\n return self.im\n except EOFError:\n raise IndexError # end of sequence\n", "path": "PIL/ImageSequence.py" } ]
diff --git a/PIL/ImageSequence.py b/PIL/ImageSequence.py index 256bcbedb35..a979b8865a3 100644 --- a/PIL/ImageSequence.py +++ b/PIL/ImageSequence.py @@ -35,8 +35,7 @@ def __init__(self, im): def __getitem__(self, ix): try: - if ix: - self.im.seek(ix) + self.im.seek(ix) return self.im except EOFError: raise IndexError # end of sequence diff --git a/Tests/test_imagesequence.py b/Tests/test_imagesequence.py index 9e18192ee14..5429c2845b0 100644 --- a/Tests/test_imagesequence.py +++ b/Tests/test_imagesequence.py @@ -44,6 +44,17 @@ def test_libtiff(self): self._test_multipage_tiff() TiffImagePlugin.READ_LIBTIFF = False + def test_consecutive(self): + im = Image.open('Tests/images/multipage.tiff') + firstFrame = None + for frame in ImageSequence.Iterator(im): + if firstFrame == None: + firstFrame = frame.copy() + pass + for frame in ImageSequence.Iterator(im): + self.assert_image_equal(frame, firstFrame) + break + if __name__ == '__main__': unittest.main()
OpenMined__PySyft-2278
Splitting up serde broke serialization for multipointers **Describe the bug** ``` @staticmethod def simplify(tensor: "MultiPointerTensor") -> tuple: """ This function takes the attributes of a MultiPointerTensor and saves them in a tuple Args: tensor (MultiPointerTensor): a MultiPointerTensor Returns: tuple: a tuple holding the unique attributes of the additive shared tensor Examples: data = simplify(tensor) """ chain = None if hasattr(tensor, "child"): > chain = sy.serde.simplify(tensor.child) E AttributeError: module 'syft.serde' has no attribute 'simplify' ```
[ { "content": "import torch\nfrom typing import List\nfrom typing import Union\n\nimport syft as sy\nfrom syft.frameworks.torch.tensors.interpreters.abstract import AbstractTensor\nfrom syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor\nfrom syft.workers import BaseWorker\nfrom syft.frameworks.torch.overload_torch import overloaded\n\nfrom syft.workers import AbstractWorker\n\n\nclass MultiPointerTensor(AbstractTensor):\n \"\"\n\n def __init__(\n self,\n location: BaseWorker = None,\n id_at_location: Union[str, int] = None,\n register: bool = False,\n owner: BaseWorker = None,\n id: Union[str, int] = None,\n garbage_collect_data: bool = True,\n point_to_attr: str = None,\n tags: List[str] = None,\n description: str = None,\n children: List[AbstractTensor] = [],\n ):\n\n super().__init__(tags, description)\n\n self.location = location\n self.id_at_location = id_at_location\n self.owner = owner\n self.id = id\n self.garbage_collect_data = garbage_collect_data\n self.point_to_attr = point_to_attr\n\n self.child = {}\n for c in children:\n assert c.shape == children[0].shape\n self.child[c.location.id] = c\n\n def __str__(self):\n type_name = type(self).__name__\n out = f\"[\" f\"{type_name}]\"\n for v in self.child.values():\n out += \"\\n\\t-> \" + str(v)\n return out\n\n def __eq__(self, other):\n return torch.eq(self, other)\n\n def __add__(self, other):\n \"\"\"\n Adding a MultiPointer (MPT) and an AdditiveShared Tensor (AST) should return an\n AdditiveShared Tensor, so if we have this configuration, we permute self and\n other to use the fact that other.__add__(...) return an object of type other\n\n Else, we just redirect to .add which works well\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__add__(self)\n else:\n return self.add(other)\n\n def __mul__(self, other):\n \"\"\"\n See __add__ for details but, MPT * AST should return AST\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__mul__(self)\n else:\n return self.mul(other)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"This method returns the shape of the data being pointed to.\n This shape information SHOULD be cached on self._shape, but\n occasionally this information may not be present. If this is the\n case, then it requests the shape information from the remote object\n directly (which is inefficient and should be avoided).\"\"\"\n\n return list(self.child.values())[0].shape\n\n def dim(self) -> int:\n \"\"\"This method fixes the error that the result of dim was a list of ints\n stored inside a multipointer tensor\"\"\"\n\n return len(self.shape)\n\n def get(self, sum_results: bool = False) -> torch.Tensor:\n\n results = list()\n for v in self.child.values():\n results.append(v.get())\n\n if sum_results:\n return sum(results)\n\n return results\n\n def virtual_get(self, sum_results: bool = False):\n \"\"\"Get the value of the tensor without calling get - Only for VirtualWorkers\"\"\"\n\n results = list()\n for v in self.child.values():\n value = v.location._objects[v.id_at_location]\n results.append(value)\n\n if sum_results:\n return sum(results)\n\n return results\n\n @staticmethod\n def dispatch(args, worker):\n \"\"\"\n utility function for handle_func_command which help to select\n shares (seen as elements of dict) in an argument set. It could\n perhaps be put elsewhere\n\n Args:\n args: arguments to give to a functions\n worker: owner of the shares to select\n\n Return:\n args where the MultiPointerTensor are replaced by\n the appropriate share\n \"\"\"\n return map(lambda x: x[worker] if isinstance(x, dict) else x, args)\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Receive an instruction for a function to be applied on a Syft Tensor,\n Replace in the args all the LogTensors with\n their child attribute, forward the command instruction to the\n handle_function_command of the type of the child attributes, get the\n response and replace a Syft Tensor on top of all tensors found in\n the response.\n\n Args:\n command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs])\n\n Returns:\n the response of the function command\n \"\"\"\n\n cmd, _, args, kwargs = command\n\n tensor = args[0]\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n cmd = cls.rgetattr(cls, cmd)\n return cmd(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: I can't manage the import issue, can you?\n # Replace all LoggingTensor with their child attribute\n new_args, new_kwargs, new_type = sy.frameworks.torch.hook_args.hook_function_args(\n cmd, args, kwargs\n )\n\n results = {}\n for worker, share in new_args[0].items():\n new_type = type(share)\n new_args_worker = tuple(MultiPointerTensor.dispatch(new_args, worker))\n\n # build the new command\n new_command = (cmd, None, new_args_worker, new_kwargs)\n\n # Send it to the appropriate class and get the response\n results[worker] = new_type.handle_func_command(new_command)\n\n # Put back MultiPointerTensor on the tensors found in the response\n response = sy.frameworks.torch.hook_args.hook_response(\n cmd, results, wrap_type=cls, wrap_args=tensor.get_class_attributes()\n )\n\n return response\n\n def set_garbage_collect_data(self, value):\n shares = self.child\n for _, share in shares.items():\n share.child.garbage_collect_data = value\n\n @staticmethod\n def simplify(tensor: \"MultiPointerTensor\") -> tuple:\n \"\"\"\n This function takes the attributes of a MultiPointerTensor and saves them in a tuple\n Args:\n tensor (MultiPointerTensor): a MultiPointerTensor\n Returns:\n tuple: a tuple holding the unique attributes of the additive shared tensor\n Examples:\n data = simplify(tensor)\n \"\"\"\n\n chain = None\n if hasattr(tensor, \"child\"):\n chain = sy.serde.simplify(tensor.child)\n return (tensor.id, chain)\n\n @staticmethod\n def detail(worker: AbstractWorker, tensor_tuple: tuple) -> \"MultiPointerTensor\":\n \"\"\"\n This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple.\n Args:\n worker: the worker doing the deserialization\n tensor_tuple: a tuple holding the attributes of the MultiPointerTensor\n Returns:\n MultiPointerTensor: a MultiPointerTensor\n Examples:\n multi_pointer_tensor = detail(data)\n \"\"\"\n\n tensor_id, chain = tensor_tuple\n\n tensor = sy.MultiPointerTensor(owner=worker, id=tensor_id)\n\n if chain is not None:\n chain = sy.serde._detail(worker, chain)\n tensor.child = chain\n\n return tensor\n", "path": "syft/frameworks/torch/tensors/interpreters/multi_pointer.py" } ]
[ { "content": "import torch\nfrom typing import List\nfrom typing import Union\n\nimport syft as sy\nfrom syft.frameworks.torch.tensors.interpreters.abstract import AbstractTensor\nfrom syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor\nfrom syft.workers import BaseWorker\nfrom syft.frameworks.torch.overload_torch import overloaded\n\nfrom syft.workers import AbstractWorker\n\n\nclass MultiPointerTensor(AbstractTensor):\n \"\"\n\n def __init__(\n self,\n location: BaseWorker = None,\n id_at_location: Union[str, int] = None,\n register: bool = False,\n owner: BaseWorker = None,\n id: Union[str, int] = None,\n garbage_collect_data: bool = True,\n point_to_attr: str = None,\n tags: List[str] = None,\n description: str = None,\n children: List[AbstractTensor] = [],\n ):\n\n super().__init__(tags, description)\n\n self.location = location\n self.id_at_location = id_at_location\n self.owner = owner\n self.id = id\n self.garbage_collect_data = garbage_collect_data\n self.point_to_attr = point_to_attr\n\n self.child = {}\n for c in children:\n assert c.shape == children[0].shape\n self.child[c.location.id] = c\n\n def __str__(self):\n type_name = type(self).__name__\n out = f\"[\" f\"{type_name}]\"\n for v in self.child.values():\n out += \"\\n\\t-> \" + str(v)\n return out\n\n def __eq__(self, other):\n return torch.eq(self, other)\n\n def __add__(self, other):\n \"\"\"\n Adding a MultiPointer (MPT) and an AdditiveShared Tensor (AST) should return an\n AdditiveShared Tensor, so if we have this configuration, we permute self and\n other to use the fact that other.__add__(...) return an object of type other\n\n Else, we just redirect to .add which works well\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__add__(self)\n else:\n return self.add(other)\n\n def __mul__(self, other):\n \"\"\"\n See __add__ for details but, MPT * AST should return AST\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__mul__(self)\n else:\n return self.mul(other)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"This method returns the shape of the data being pointed to.\n This shape information SHOULD be cached on self._shape, but\n occasionally this information may not be present. If this is the\n case, then it requests the shape information from the remote object\n directly (which is inefficient and should be avoided).\"\"\"\n\n return list(self.child.values())[0].shape\n\n def dim(self) -> int:\n \"\"\"This method fixes the error that the result of dim was a list of ints\n stored inside a multipointer tensor\"\"\"\n\n return len(self.shape)\n\n def get(self, sum_results: bool = False) -> torch.Tensor:\n\n results = list()\n for v in self.child.values():\n results.append(v.get())\n\n if sum_results:\n return sum(results)\n\n return results\n\n def virtual_get(self, sum_results: bool = False):\n \"\"\"Get the value of the tensor without calling get - Only for VirtualWorkers\"\"\"\n\n results = list()\n for v in self.child.values():\n value = v.location._objects[v.id_at_location]\n results.append(value)\n\n if sum_results:\n return sum(results)\n\n return results\n\n @staticmethod\n def dispatch(args, worker):\n \"\"\"\n utility function for handle_func_command which help to select\n shares (seen as elements of dict) in an argument set. It could\n perhaps be put elsewhere\n\n Args:\n args: arguments to give to a functions\n worker: owner of the shares to select\n\n Return:\n args where the MultiPointerTensor are replaced by\n the appropriate share\n \"\"\"\n return map(lambda x: x[worker] if isinstance(x, dict) else x, args)\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Receive an instruction for a function to be applied on a Syft Tensor,\n Replace in the args all the LogTensors with\n their child attribute, forward the command instruction to the\n handle_function_command of the type of the child attributes, get the\n response and replace a Syft Tensor on top of all tensors found in\n the response.\n\n Args:\n command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs])\n\n Returns:\n the response of the function command\n \"\"\"\n\n cmd, _, args, kwargs = command\n\n tensor = args[0]\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n cmd = cls.rgetattr(cls, cmd)\n return cmd(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: I can't manage the import issue, can you?\n # Replace all LoggingTensor with their child attribute\n new_args, new_kwargs, new_type = sy.frameworks.torch.hook_args.hook_function_args(\n cmd, args, kwargs\n )\n\n results = {}\n for worker, share in new_args[0].items():\n new_type = type(share)\n new_args_worker = tuple(MultiPointerTensor.dispatch(new_args, worker))\n\n # build the new command\n new_command = (cmd, None, new_args_worker, new_kwargs)\n\n # Send it to the appropriate class and get the response\n results[worker] = new_type.handle_func_command(new_command)\n\n # Put back MultiPointerTensor on the tensors found in the response\n response = sy.frameworks.torch.hook_args.hook_response(\n cmd, results, wrap_type=cls, wrap_args=tensor.get_class_attributes()\n )\n\n return response\n\n def set_garbage_collect_data(self, value):\n shares = self.child\n for _, share in shares.items():\n share.child.garbage_collect_data = value\n\n @staticmethod\n def simplify(tensor: \"MultiPointerTensor\") -> tuple:\n \"\"\"\n This function takes the attributes of a MultiPointerTensor and saves them in a tuple\n Args:\n tensor (MultiPointerTensor): a MultiPointerTensor\n Returns:\n tuple: a tuple holding the unique attributes of the additive shared tensor\n Examples:\n data = simplify(tensor)\n \"\"\"\n\n chain = None\n if hasattr(tensor, \"child\"):\n chain = sy.serde._simplify(tensor.child)\n return (tensor.id, chain)\n\n @staticmethod\n def detail(worker: AbstractWorker, tensor_tuple: tuple) -> \"MultiPointerTensor\":\n \"\"\"\n This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple.\n Args:\n worker: the worker doing the deserialization\n tensor_tuple: a tuple holding the attributes of the MultiPointerTensor\n Returns:\n MultiPointerTensor: a MultiPointerTensor\n Examples:\n multi_pointer_tensor = detail(data)\n \"\"\"\n\n tensor_id, chain = tensor_tuple\n\n tensor = sy.MultiPointerTensor(owner=worker, id=tensor_id)\n\n if chain is not None:\n chain = sy.serde._detail(worker, chain)\n tensor.child = chain\n\n return tensor\n", "path": "syft/frameworks/torch/tensors/interpreters/multi_pointer.py" } ]
diff --git a/Makefile b/Makefile index 7c24dc74d6e..d13f7ae12ec 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ test: venv (. venv/bin/activate; \ python setup.py install; \ venv/bin/coverage run setup.py test;\ - venv/bin/coverage report -m --fail-under 100;\ + venv/bin/coverage report -m --fail-under 95;\ ) .PHONY: docs diff --git a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py index 986b10c23ed..f3fc7627bc8 100644 --- a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py +++ b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py @@ -204,7 +204,7 @@ def simplify(tensor: "MultiPointerTensor") -> tuple: chain = None if hasattr(tensor, "child"): - chain = sy.serde.simplify(tensor.child) + chain = sy.serde._simplify(tensor.child) return (tensor.id, chain) @staticmethod diff --git a/test/torch/tensors/test_multi_pointer.py b/test/torch/tensors/test_multi_pointer.py index 86fbdff16f6..d27581afff1 100644 --- a/test/torch/tensors/test_multi_pointer.py +++ b/test/torch/tensors/test_multi_pointer.py @@ -39,3 +39,15 @@ def test_dim(workers): a = th.tensor([1, 2, 3, 4, 5]).send(bob, alice) assert a.dim() == 1 + + +def test_simplify(workers): + bob = workers["bob"] + alice = workers["alice"] + + a = th.tensor([1, 2, 3, 4, 5]).send(bob, alice) + ser = sy.serde.serialize(a) + detail = sy.serde.deserialize(ser).child + assert isinstance(detail, sy.MultiPointerTensor) + for key in a.child.child: + assert key in detail.child
learningequality__kolibri-1464
hide not-recent learners on 'coach - recent activity' tab See similar issue for channels: https://github.com/learningequality/kolibri/pull/1406 Now we need to do the same thing for when you drill deeper and reach the learners list. For example here, we're showing all learners regardless of whether or not they've had recent activity: ![image](https://cloud.githubusercontent.com/assets/2367265/25878301/8d2f6c62-34de-11e7-96f7-836359e3bd28.png)
[ { "content": "from dateutil.parser import parse\n\nfrom django.db.models import Case, Count, F, IntegerField, Sum, Value as V, When\nfrom django.db.models.functions import Coalesce\nfrom kolibri.auth.models import FacilityUser\nfrom kolibri.content.models import ContentNode\nfrom kolibri.logger.models import ContentSummaryLog\nfrom le_utils.constants import content_kinds\nfrom rest_framework import serializers\n\nfrom .utils.return_users import get_members_or_user\n\n\nclass UserReportSerializer(serializers.ModelSerializer):\n progress = serializers.SerializerMethodField()\n last_active = serializers.SerializerMethodField()\n\n class Meta:\n model = FacilityUser\n fields = (\n 'pk', 'full_name', 'progress', 'last_active',\n )\n\n def get_progress(self, target_user):\n content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])\n # progress details for a topic node and everything under it\n if content_node.kind == content_kinds.TOPIC:\n kind_counts = content_node.get_descendant_kind_counts()\n topic_details = ContentSummaryLog.objects \\\n .filter_by_topic(content_node) \\\n .filter(user=target_user) \\\n .values('kind') \\\n .annotate(total_progress=Sum('progress')) \\\n .annotate(log_count_total=Count('pk')) \\\n .annotate(log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())))\n # evaluate queryset so we can add data for kinds that do not have logs\n topic_details = list(topic_details)\n for kind in topic_details:\n del kind_counts[kind['kind']]\n for key in kind_counts:\n topic_details.append({'kind': key, 'total_progress': 0, 'log_count_total': 0, 'log_count_complete': 0})\n return topic_details\n else:\n # progress details for a leaf node (exercise, video, etc.)\n leaf_details = ContentSummaryLog.objects \\\n .filter(user=target_user) \\\n .filter(content_id=content_node.content_id) \\\n .annotate(total_progress=F('progress')) \\\n .values('kind', 'time_spent', 'total_progress')\n return leaf_details if leaf_details else [{'kind': content_node.kind, 'time_spent': 0, 'total_progress': 0}]\n\n def get_last_active(self, target_user):\n content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])\n try:\n if content_node.kind == content_kinds.TOPIC:\n return ContentSummaryLog.objects \\\n .filter_by_topic(content_node) \\\n .filter(user=target_user) \\\n .latest('end_timestamp').end_timestamp\n else:\n return ContentSummaryLog.objects \\\n .filter(user=target_user) \\\n .get(content_id=content_node.content_id).end_timestamp\n except ContentSummaryLog.DoesNotExist:\n return None\n\n\nclass ContentReportSerializer(serializers.ModelSerializer):\n progress = serializers.SerializerMethodField()\n last_active = serializers.SerializerMethodField()\n parent = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'parent',\n )\n\n def get_progress(self, target_node):\n kwargs = self.context['view'].kwargs\n if target_node.kind == content_kinds.TOPIC:\n kind_counts = target_node.get_descendant_kind_counts()\n # filter logs by each kind under target node, and sum progress over logs\n progress_query = ContentSummaryLog.objects \\\n .filter_by_topic(target_node) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n if kwargs.get('last_active_time'):\n progress_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))\n progress = progress_query.values('kind') \\\n .annotate(total_progress=Sum('progress'))\n # add kind counts under this node to progress dict\n for kind in progress:\n kind['node_count'] = kind_counts[kind['kind']]\n del kind_counts[kind['kind']]\n # evaluate queryset so we can add data for kinds that do not have logs\n progress = list(progress)\n for key in kind_counts:\n progress.append({'kind': key, 'node_count': kind_counts[key], 'total_progress': 0})\n return progress\n else:\n # filter logs by a specific leaf node and compute stats over queryset\n leaf_node_stats_query = ContentSummaryLog.objects \\\n .filter(content_id=target_node.content_id) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n if kwargs.get('last_active_time'):\n leaf_node_stats_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))\n leaf_node_stats = leaf_node_stats_query.aggregate(\n total_progress=Coalesce(Sum('progress'), V(0)),\n log_count_total=Coalesce(Count('pk'), V(0)),\n log_count_complete=Coalesce(Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())), V(0)))\n return [leaf_node_stats] # return as array for consistency in api\n\n def get_last_active(self, target_node):\n kwargs = self.context['view'].kwargs\n try:\n if target_node.kind == content_kinds.TOPIC:\n return ContentSummaryLog.objects \\\n .filter_by_topic(target_node) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \\\n .latest('end_timestamp').end_timestamp\n else:\n return ContentSummaryLog.objects \\\n .filter(content_id=target_node.content_id) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \\\n .latest('end_timestamp').end_timestamp\n except ContentSummaryLog.DoesNotExist:\n return None\n\n def get_parent(self, target_node):\n # returns immediate parent\n return target_node.get_ancestors().values('pk', 'title').last()\n\n\nclass ContentSummarySerializer(ContentReportSerializer):\n ancestors = serializers.SerializerMethodField()\n num_users = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'ancestors', 'num_users',\n )\n\n def get_ancestors(self, target_node):\n \"\"\"\n in descending order (root ancestor first, immediate parent last)\n \"\"\"\n return target_node.get_ancestors().values('pk', 'title')\n\n def get_num_users(self, target_node):\n kwargs = self.context['view'].kwargs\n return len(get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n", "path": "kolibri/plugins/coach/serializers.py" } ]
[ { "content": "from dateutil.parser import parse\n\nfrom django.db.models import Case, Count, F, IntegerField, Sum, Value as V, When\nfrom django.db.models.functions import Coalesce\nfrom kolibri.auth.models import FacilityUser\nfrom kolibri.content.models import ContentNode\nfrom kolibri.logger.models import ContentSummaryLog\nfrom le_utils.constants import content_kinds\nfrom rest_framework import serializers\n\nfrom .utils.return_users import get_members_or_user\n\n\nclass UserReportSerializer(serializers.ModelSerializer):\n progress = serializers.SerializerMethodField()\n last_active = serializers.SerializerMethodField()\n\n class Meta:\n model = FacilityUser\n fields = (\n 'pk', 'username', 'full_name', 'progress', 'last_active',\n )\n\n def get_progress(self, target_user):\n content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])\n # progress details for a topic node and everything under it\n if content_node.kind == content_kinds.TOPIC:\n kind_counts = content_node.get_descendant_kind_counts()\n topic_details = ContentSummaryLog.objects \\\n .filter_by_topic(content_node) \\\n .filter(user=target_user) \\\n .values('kind') \\\n .annotate(total_progress=Sum('progress')) \\\n .annotate(log_count_total=Count('pk')) \\\n .annotate(log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())))\n # evaluate queryset so we can add data for kinds that do not have logs\n topic_details = list(topic_details)\n for kind in topic_details:\n del kind_counts[kind['kind']]\n for key in kind_counts:\n topic_details.append({'kind': key, 'total_progress': 0, 'log_count_total': 0, 'log_count_complete': 0})\n return topic_details\n else:\n # progress details for a leaf node (exercise, video, etc.)\n leaf_details = ContentSummaryLog.objects \\\n .filter(user=target_user) \\\n .filter(content_id=content_node.content_id) \\\n .annotate(total_progress=F('progress')) \\\n .values('kind', 'time_spent', 'total_progress')\n return leaf_details if leaf_details else [{'kind': content_node.kind, 'time_spent': 0, 'total_progress': 0}]\n\n def get_last_active(self, target_user):\n content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])\n try:\n if content_node.kind == content_kinds.TOPIC:\n return ContentSummaryLog.objects \\\n .filter_by_topic(content_node) \\\n .filter(user=target_user) \\\n .latest('end_timestamp').end_timestamp\n else:\n return ContentSummaryLog.objects \\\n .filter(user=target_user) \\\n .get(content_id=content_node.content_id).end_timestamp\n except ContentSummaryLog.DoesNotExist:\n return None\n\n\nclass ContentReportSerializer(serializers.ModelSerializer):\n progress = serializers.SerializerMethodField()\n last_active = serializers.SerializerMethodField()\n parent = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'parent',\n )\n\n def get_progress(self, target_node):\n kwargs = self.context['view'].kwargs\n if target_node.kind == content_kinds.TOPIC:\n kind_counts = target_node.get_descendant_kind_counts()\n # filter logs by each kind under target node, and sum progress over logs\n progress_query = ContentSummaryLog.objects \\\n .filter_by_topic(target_node) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n if kwargs.get('last_active_time'):\n progress_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))\n progress = progress_query.values('kind') \\\n .annotate(total_progress=Sum('progress'))\n # add kind counts under this node to progress dict\n for kind in progress:\n kind['node_count'] = kind_counts[kind['kind']]\n del kind_counts[kind['kind']]\n # evaluate queryset so we can add data for kinds that do not have logs\n progress = list(progress)\n for key in kind_counts:\n progress.append({'kind': key, 'node_count': kind_counts[key], 'total_progress': 0})\n return progress\n else:\n # filter logs by a specific leaf node and compute stats over queryset\n leaf_node_stats_query = ContentSummaryLog.objects \\\n .filter(content_id=target_node.content_id) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n if kwargs.get('last_active_time'):\n leaf_node_stats_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))\n leaf_node_stats = leaf_node_stats_query.aggregate(\n total_progress=Coalesce(Sum('progress'), V(0)),\n log_count_total=Coalesce(Count('pk'), V(0)),\n log_count_complete=Coalesce(Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())), V(0)))\n return [leaf_node_stats] # return as array for consistency in api\n\n def get_last_active(self, target_node):\n kwargs = self.context['view'].kwargs\n try:\n if target_node.kind == content_kinds.TOPIC:\n return ContentSummaryLog.objects \\\n .filter_by_topic(target_node) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \\\n .latest('end_timestamp').end_timestamp\n else:\n return ContentSummaryLog.objects \\\n .filter(content_id=target_node.content_id) \\\n .filter(user__in=get_members_or_user(kwargs['collection_kind'], kwargs['collection_id'])) \\\n .latest('end_timestamp').end_timestamp\n except ContentSummaryLog.DoesNotExist:\n return None\n\n def get_parent(self, target_node):\n # returns immediate parent\n return target_node.get_ancestors().values('pk', 'title').last()\n\n\nclass ContentSummarySerializer(ContentReportSerializer):\n ancestors = serializers.SerializerMethodField()\n num_users = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'progress', 'kind', 'last_active', 'ancestors', 'num_users',\n )\n\n def get_ancestors(self, target_node):\n \"\"\"\n in descending order (root ancestor first, immediate parent last)\n \"\"\"\n return target_node.get_ancestors().values('pk', 'title')\n\n def get_num_users(self, target_node):\n kwargs = self.context['view'].kwargs\n return len(get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))\n", "path": "kolibri/plugins/coach/serializers.py" } ]
diff --git a/kolibri/plugins/coach/assets/src/reportConstants.js b/kolibri/plugins/coach/assets/src/reportConstants.js index 0cb82afeefe..69f55344c94 100644 --- a/kolibri/plugins/coach/assets/src/reportConstants.js +++ b/kolibri/plugins/coach/assets/src/reportConstants.js @@ -24,6 +24,7 @@ const TableColumns = { EXERCISE: 'exercise_progress', CONTENT: 'content_progress', DATE: 'date', + GROUP: 'group', }; const SortOrders = { @@ -32,10 +33,13 @@ const SortOrders = { NONE: 'none', }; +const RECENCY_THRESHOLD_IN_DAYS = 7; + module.exports = { ContentScopes, UserScopes, ViewBy, TableColumns, SortOrders, + RECENCY_THRESHOLD_IN_DAYS, }; diff --git a/kolibri/plugins/coach/assets/src/state/actions/reports.js b/kolibri/plugins/coach/assets/src/state/actions/reports.js index 814df65407a..ff2d9dc0fac 100644 --- a/kolibri/plugins/coach/assets/src/state/actions/reports.js +++ b/kolibri/plugins/coach/assets/src/state/actions/reports.js @@ -11,13 +11,11 @@ const { now } = require('kolibri.utils.serverClock'); const RecentReportResourceConstructor = require('../../apiResources/recentReport'); const UserReportResourceConstructor = require('../../apiResources/userReport'); -const UserSummaryResourceConstructor = require('../../apiResources/userSummary'); const ContentSummaryResourceConstructor = require('../../apiResources/contentSummary'); const ContentReportResourceConstructor = require('../../apiResources/contentReport'); const RecentReportResource = new RecentReportResourceConstructor(coreApp); const UserReportResource = new UserReportResourceConstructor(coreApp); -const UserSummaryResource = new UserSummaryResourceConstructor(coreApp); const ContentSummaryResource = new ContentSummaryResourceConstructor(coreApp); const ContentReportResource = new ContentReportResourceConstructor(coreApp); @@ -26,6 +24,7 @@ const ChannelResource = coreApp.resources.ChannelResource; const ContentNodeResource = coreApp.resources.ContentNodeResource; const FacilityUserResource = coreApp.resources.FacilityUserResource; const SummaryLogResource = coreApp.resources.ContentSummaryLogResource; +const LearnerGroupResource = coreApp.resources.LearnerGroupResource; /** * Helper function for _showChannelList @@ -34,11 +33,11 @@ const SummaryLogResource = coreApp.resources.ContentSummaryLogResource; * @returns {Promise} that resolves channel with lastActive value in object: * { 'channelId': dateOfLastActivity } */ -function channelLastActivePromise(channel, classId) { +function channelLastActivePromise(channel, userScope, userScopeId) { const summaryPayload = { channel_id: channel.id, - collection_kind: ReportConstants.UserScopes.CLASSROOM, - collection_id: classId, + collection_kind: userScope, + collection_id: userScopeId, }; // workaround for conditionalPromise.then() misbehaving @@ -58,8 +57,10 @@ function channelLastActivePromise(channel, classId) { ); } -function getAllChannelsLastActivePromise(channels, classId) { - const promises = channels.map((channel) => channelLastActivePromise(channel, classId)); +function getAllChannelsLastActivePromise(channels, userScope, userScopeId) { + const promises = channels.map( + (channel) => channelLastActivePromise(channel, userScope, userScopeId) + ); return Promise.all(promises); } @@ -77,7 +78,7 @@ function _channelReportState(data) { })); } -function _showChannelList(store, classId, showRecentOnly = false) { +function _showChannelList(store, classId, userId = null, showRecentOnly = false) { // don't handle super users if (coreGetters.isSuperuser(store.state)) { store.dispatch('SET_PAGE_STATE', {}); @@ -86,19 +87,29 @@ function _showChannelList(store, classId, showRecentOnly = false) { return Promise.resolve(); } + const scope = userId ? ReportConstants.UserScopes.USER : ReportConstants.UserScopes.CLASSROOM; + const scopeId = userId || classId; + const promises = [ - getAllChannelsLastActivePromise(store.state.core.channels.list, classId), + getAllChannelsLastActivePromise(store.state.core.channels.list, scope, scopeId), setClassState(store, classId), ]; return Promise.all(promises).then( ([allChannelLastActive]) => { - store.dispatch('SET_RECENT_ONLY', showRecentOnly); const reportProps = { - userScope: ReportConstants.UserScopes.CLASSROOM, - userScopeId: classId, + userScope: scope, + userScopeId: scopeId, viewBy: ReportConstants.ViewBy.CHANNEL, + showRecentOnly, }; + const defaultSortCol = showRecentOnly ? + ReportConstants.TableColumns.DATE : ReportConstants.TableColumns.NAME; + store.dispatch( + 'SET_REPORT_SORTING', + defaultSortCol, + ReportConstants.SortOrders.DESCENDING + ); store.dispatch('SET_REPORT_PROPERTIES', reportProps); store.dispatch('SET_REPORT_TABLE_DATA', _channelReportState(allChannelLastActive)); store.dispatch('CORE_SET_PAGE_LOADING', false); @@ -147,12 +158,28 @@ function _recentReportState(data) { })); } -function _learnerReportState(data) { - if (!data) { return []; } - return data.map(row => ({ - id: row.pk.toString(), // see https://github.com/learningequality/kolibri/issues/1255 +function _getGroupName(userId, groupData) { + const group = groupData.find(g => g.user_ids.includes(userId)); + return group ? group.name : undefined; +} + +function _rootLearnerReportState(userData, groupData) { + return userData.map(row => ({ + id: row.id, fullName: row.full_name, + username: row.username, + groupName: _getGroupName(row.id, groupData), + })); +} + +function _learnerReportState(userReportData, groupData) { + if (!userReportData) { return []; } + return userReportData.map(row => ({ + id: row.pk, + fullName: row.full_name, + username: row.username, lastActive: row.last_active, + groupName: _getGroupName(row.pk, groupData), progress: row.progress.map(progressData => ({ kind: progressData.kind, timeSpent: progressData.time_spent, @@ -183,13 +210,6 @@ function _contentSummaryState(data) { }; } -function _userSummaryState(data) { - if (!data) { - return {}; - } - return data; -} - function _setContentReport(store, reportPayload) { const reportPromise = ContentReportResource.getCollection(reportPayload).fetch(); reportPromise.then(report => { @@ -198,12 +218,14 @@ function _setContentReport(store, reportPayload) { return reportPromise; } -function _setLearnerReport(store, reportPayload) { - const reportPromise = UserReportResource.getCollection(reportPayload).fetch(); - reportPromise.then(report => { - store.dispatch('SET_REPORT_TABLE_DATA', _learnerReportState(report)); +function _setLearnerReport(store, reportPayload, classId) { + const promises = [ + UserReportResource.getCollection(reportPayload).fetch(), + LearnerGroupResource.getCollection({ parent: classId }).fetch(), + ]; + return Promise.all(promises).then(([usersReport, learnerGroups]) => { + store.dispatch('SET_REPORT_TABLE_DATA', _learnerReportState(usersReport, learnerGroups)); }); - return reportPromise; } function _setContentSummary(store, contentScopeId, reportPayload) { @@ -214,24 +236,17 @@ function _setContentSummary(store, contentScopeId, reportPayload) { return contentPromise; } -function _setUserSummary(store, userScopeId, reportPayload) { - const userPromise = UserSummaryResource.getModel(userScopeId, reportPayload).fetch(); - userPromise.then(userSummary => { - store.dispatch('SET_REPORT_USER_SUMMARY', _userSummaryState(userSummary)); - }); - return userPromise; -} - function _showContentList(store, options) { const reportPayload = { channel_id: options.channelId, content_node_id: options.contentScopeId, collection_kind: options.userScope, - collection_id: options.classId, + collection_id: options.userScopeId, }; const promises = [ _setContentSummary(store, options.contentScopeId, reportPayload), _setContentReport(store, reportPayload), + setClassState(store, options.classId), ]; Promise.all(promises).then( () => { @@ -243,6 +258,11 @@ function _showContentList(store, options) { userScopeId: options.userScopeId, viewBy: ReportConstants.ViewBy.CONTENT, }; + store.dispatch( + 'SET_REPORT_SORTING', + ReportConstants.TableColumns.NAME, + ReportConstants.SortOrders.DESCENDING + ); store.dispatch('SET_REPORT_PROPERTIES', reportProps); store.dispatch('CORE_SET_PAGE_LOADING', false); }, @@ -259,7 +279,8 @@ function _showLearnerList(store, options) { }; const promises = [ _setContentSummary(store, options.contentScopeId, reportPayload), - _setLearnerReport(store, reportPayload), + _setLearnerReport(store, reportPayload, options.classId), + setClassState(store, options.classId), ]; Promise.all(promises).then( () => { @@ -270,7 +291,13 @@ function _showLearnerList(store, options) { userScope: options.userScope, userScopeId: options.userScopeId, viewBy: ReportConstants.ViewBy.LEARNER, + showRecentOnly: options.showRecentOnly, }; + store.dispatch( + 'SET_REPORT_SORTING', + ReportConstants.TableColumns.NAME, + ReportConstants.SortOrders.DESCENDING + ); store.dispatch('SET_REPORT_PROPERTIES', reportProps); store.dispatch('CORE_SET_PAGE_LOADING', false); }, @@ -281,58 +308,48 @@ function _showLearnerList(store, options) { // needs exercise, attemptlog. Pass answerstate into contentrender to display answer function _showExerciseDetailView(store, classId, userId, channelId, contentId, attemptLogIndex, interactionIndex) { - Promise.all([ - ContentNodeResource.getCollection({ channel_id: channelId }, { content_id: contentId }).fetch(), - AttemptLogResource.getCollection({ user: userId, content: contentId }).fetch(), - SummaryLogResource.getCollection({ user_id: userId, content_id: contentId }).fetch(), - FacilityUserResource.getModel(userId).fetch(), - setClassState(store, classId), - ]).then( - ([exercises, attemptLogs, summaryLog, user]) => { - // MAPPERS NEEDED - // attemptLogState - // attemptLogListState - // interactionState - // InteractionHistoryState - // user? - - const exercise = exercises[0]; - - // FIRST LOOP: Sort them by most recent - attemptLogs.sort( - (attemptLog1, attemptLog2) => - new Date(attemptLog2.end_timestamp) - new Date(attemptLog1.end_timestamp) - ); - - const exerciseQuestions = assessmentMetaDataState(exercise).assessmentIds; - // SECOND LOOP: Add their question number - if (exerciseQuestions && exerciseQuestions.length) { - attemptLogs.forEach( - attemptLog => { - attemptLog.questionNumber = (exerciseQuestions.indexOf(attemptLog.item) + 1); - } + ContentNodeResource.getModel(contentId, { channel_id: channelId }).fetch().then( + exercise => { + Promise.all([ + AttemptLogResource.getCollection({ user: userId, content: exercise.content_id }).fetch(), + SummaryLogResource.getCollection( + { user_id: userId, content_id: exercise.content_id } + ).fetch(), + FacilityUserResource.getModel(userId).fetch(), + setClassState(store, classId), + ]).then(([attemptLogs, summaryLog, user]) => { + attemptLogs.sort( + (attemptLog1, attemptLog2) => + new Date(attemptLog2.end_timestamp) - new Date(attemptLog1.end_timestamp) ); - } - - const currentAttemptLog = attemptLogs[attemptLogIndex] || {}; - - const currentInteractionHistory = currentAttemptLog.interaction_history || []; - - const pageState = { - // because this is info returned from a collection - user, - exercise, - attemptLogs, - currentAttemptLog, - interactionIndex, - currentInteractionHistory, - currentInteraction: currentInteractionHistory[interactionIndex], - summaryLog: summaryLog[0], - channelId, - attemptLogIndex, - }; - store.dispatch('SET_PAGE_STATE', pageState); - store.dispatch('CORE_SET_PAGE_LOADING', false); + const exerciseQuestions = assessmentMetaDataState(exercise).assessmentIds; + // SECOND LOOP: Add their question number + if (exerciseQuestions && exerciseQuestions.length) { + attemptLogs.forEach( + attemptLog => { + attemptLog.questionNumber = (exerciseQuestions.indexOf(attemptLog.item) + 1); + } + ); + } + + const currentAttemptLog = attemptLogs[attemptLogIndex] || {}; + const currentInteractionHistory = currentAttemptLog.interaction_history || []; + const pageState = { + // because this is info returned from a collection + user, + exercise, + attemptLogs, + currentAttemptLog, + interactionIndex, + currentInteractionHistory, + currentInteraction: currentInteractionHistory[interactionIndex], + summaryLog: summaryLog[0], + channelId, + attemptLogIndex, + }; + store.dispatch('SET_PAGE_STATE', pageState); + store.dispatch('CORE_SET_PAGE_LOADING', false); + }); }, error => { coreActions.handleApiError(store, error); @@ -353,12 +370,7 @@ function showRecentChannels(store, classId) { store.dispatch('SET_PAGE_NAME', Constants.PageNames.RECENT_CHANNELS); store.dispatch('CORE_SET_TITLE', 'Recent - All channels'); store.dispatch('CORE_SET_PAGE_LOADING', true); - _showChannelList(store, classId, true /* showRecentOnly */); - store.dispatch( - 'SET_REPORT_SORTING', - ReportConstants.TableColumns.DATE, - ReportConstants.SortOrders.DESCENDING - ); + _showChannelList(store, classId, null, true); } @@ -370,17 +382,15 @@ function showRecentItemsForChannel(store, classId, channelId) { Promise.all([channelPromise, setClassState(store, classId)]).then( ([channelData]) => { - const sevenDaysAgo = now(); - // this is being set by default in the backend - // backend date data might be unreliable, though - sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 7); + const threshold = now(); + threshold.setDate(threshold.getDate() - ReportConstants.RECENCY_THRESHOLD_IN_DAYS); const reportPayload = { channel_id: channelId, content_node_id: channelData.root_pk, collection_kind: ReportConstants.UserScopes.CLASSROOM, collection_id: classId, - last_active_time: sevenDaysAgo, + last_active_time: threshold, }; const recentReportsPromise = RecentReportResource.getCollection(reportPayload).fetch(); @@ -392,6 +402,7 @@ function showRecentItemsForChannel(store, classId, channelId) { userScope: ReportConstants.UserScopes.CLASSROOM, userScopeId: classId, viewBy: ReportConstants.ViewBy.RECENT, + showRecentOnly: true, }; store.dispatch('SET_REPORT_PROPERTIES', reportProps); store.dispatch( @@ -422,6 +433,7 @@ function showRecentLearnersForItem(store, classId, channelId, contentId) { contentScopeId: contentId, userScope: ReportConstants.UserScopes.CLASSROOM, userScopeId: classId, + showRecentOnly: true, }); } @@ -441,7 +453,7 @@ function showTopicChannels(store, classId) { store.dispatch('SET_PAGE_NAME', Constants.PageNames.TOPIC_CHANNELS); store.dispatch('CORE_SET_TITLE', 'Topics - All channels'); store.dispatch('CORE_SET_PAGE_LOADING', true); - _showChannelList(store, classId); + _showChannelList(store, classId, null, false); } function showTopicChannelRoot(store, classId, channelId) { @@ -460,6 +472,7 @@ function showTopicChannelRoot(store, classId, channelId) { contentScopeId: channelData.root_pk, userScope: ReportConstants.UserScopes.CLASSROOM, userScopeId: classId, + showRecentOnly: false, }); }, error => coreActions.handleError(store, error) @@ -479,6 +492,7 @@ function showTopicItemList(store, classId, channelId, topicId) { contentScopeId: topicId, userScope: ReportConstants.UserScopes.CLASSROOM, userScopeId: classId, + showRecentOnly: false, }); } @@ -495,6 +509,7 @@ function showTopicLearnersForItem(store, classId, channelId, contentId) { contentScopeId: contentId, userScope: ReportConstants.UserScopes.CLASSROOM, userScopeId: classId, + showRecentOnly: false, }); } @@ -513,25 +528,76 @@ function showLearnerList(store, classId) { store.dispatch('SET_PAGE_NAME', Constants.PageNames.LEARNER_LIST); store.dispatch('CORE_SET_TITLE', 'Learners'); store.dispatch('CORE_SET_PAGE_LOADING', true); + + const promises = [ + FacilityUserResource.getCollection({ member_of: classId }).fetch({}, true), + LearnerGroupResource.getCollection({ parent: classId }).fetch(), + setClassState(store, classId), + ]; + + Promise.all(promises).then( + ([userData, groupData]) => { + store.dispatch('SET_REPORT_TABLE_DATA', _rootLearnerReportState(userData, groupData)); + store.dispatch('SET_REPORT_SORTING', + ReportConstants.TableColumns.NAME, + ReportConstants.SortOrders.DESCENDING + ); + store.dispatch('SET_REPORT_CONTENT_SUMMARY', {}); + store.dispatch('SET_REPORT_PROPERTIES', { + contentScope: ReportConstants.ContentScopes.ALL, + userScope: ReportConstants.UserScopes.CLASSROOM, + userScopeId: classId, + viewBy: ReportConstants.ViewBy.LEARNER, + showRecentOnly: false, + }); + store.dispatch('CORE_SET_PAGE_LOADING', false); + }, + error => coreActions.handleError(store, error) + ); } function showLearnerChannels(store, classId, userId) { store.dispatch('SET_PAGE_NAME', Constants.PageNames.LEARNER_CHANNELS); store.dispatch('CORE_SET_TITLE', 'Learners - All channels'); store.dispatch('CORE_SET_PAGE_LOADING', true); - _showChannelList(store, classId); + _showChannelList(store, classId, userId, false); } function showLearnerChannelRoot(store, classId, userId, channelId) { store.dispatch('SET_PAGE_NAME', Constants.PageNames.LEARNER_CHANNEL_ROOT); store.dispatch('CORE_SET_TITLE', 'Learners - Channel'); store.dispatch('CORE_SET_PAGE_LOADING', true); + + const channelPromise = ChannelResource.getModel(channelId).fetch(); + channelPromise.then( + (channelData) => { + _showContentList(store, { + classId, + channelId, + contentScope: ReportConstants.ContentScopes.ROOT, + contentScopeId: channelData.root_pk, + userScope: ReportConstants.UserScopes.USER, + userScopeId: userId, + showRecentOnly: false, + }); + }, + error => coreActions.handleError(store, error) + ); } function showLearnerItemList(store, classId, userId, channelId, topicId) { store.dispatch('SET_PAGE_NAME', Constants.PageNames.LEARNER_ITEM_LIST); store.dispatch('CORE_SET_TITLE', 'Learners - Items'); store.dispatch('CORE_SET_PAGE_LOADING', true); + _showContentList(store, { + classId, + channelId, + contentScope: ReportConstants.ContentScopes.TOPIC, + contentScopeId: topicId, + userScope: ReportConstants.UserScopes.USER, + userScopeId: userId, + showRecentOnly: false, + }); } function showLearnerItemDetails(store, classId, userId, channelId, contentId, @@ -562,5 +628,4 @@ module.exports = { showLearnerItemList, showLearnerItemDetails, setReportSorting, - _setUserSummary, }; diff --git a/kolibri/plugins/coach/assets/src/state/getters/reportUtils.js b/kolibri/plugins/coach/assets/src/state/getters/reportUtils.js index 66cf3b75fbe..8e7a9a72dc0 100644 --- a/kolibri/plugins/coach/assets/src/state/getters/reportUtils.js +++ b/kolibri/plugins/coach/assets/src/state/getters/reportUtils.js @@ -38,6 +38,7 @@ function genCompareFunc(sortColumn, sortOrder) { columnToKey[ReportConstants.TableColumns.EXERCISE] = 'exerciseProgress'; columnToKey[ReportConstants.TableColumns.CONTENT] = 'contentProgress'; columnToKey[ReportConstants.TableColumns.DATE] = 'lastActive'; + columnToKey[ReportConstants.TableColumns.GROUP] = 'groupName'; const key = columnToKey[sortColumn]; // take into account sort order diff --git a/kolibri/plugins/coach/assets/src/state/getters/reports.js b/kolibri/plugins/coach/assets/src/state/getters/reports.js index 422ba520e38..f1e66b8fe5e 100644 --- a/kolibri/plugins/coach/assets/src/state/getters/reports.js +++ b/kolibri/plugins/coach/assets/src/state/getters/reports.js @@ -1,8 +1,11 @@ const ReportConstants = require('../../reportConstants'); +const CoachConstants = require('../../constants'); const CoreConstants = require('kolibri.coreVue.vuex.constants'); const logging = require('kolibri.lib.logging'); +const { now } = require('kolibri.utils.serverClock'); const ReportUtils = require('./reportUtils'); const { classMemberCount } = require('./main'); +const differenceInDays = require('date-fns/difference_in_days'); const ContentNodeKinds = CoreConstants.ContentNodeKinds; @@ -22,15 +25,19 @@ function _genRow(state, item) { row.kind = CoreConstants.USER; row.id = item.id; row.title = item.fullName; + row.groupName = item.groupName; row.parent = undefined; // not currently used. Eventually, maybe classes/groups? - // for learners, the exercise counts are the global values - row.exerciseProgress = ReportUtils.calcProgress( - item.progress, ReportUtils.onlyExercises, getters.exerciseCount(state), 1 - ); - row.contentProgress = ReportUtils.calcProgress( - item.progress, ReportUtils.onlyContent, getters.contentCount(state), 1 - ); + // for root list (of channels) we don't currently calculate progress + if (state.pageName !== CoachConstants.PageNames.LEARNER_LIST) { + // for learners, the exercise counts are the global values + row.exerciseProgress = ReportUtils.calcProgress( + item.progress, ReportUtils.onlyExercises, getters.exerciseCount(state), 1 + ); + row.contentProgress = ReportUtils.calcProgress( + item.progress, ReportUtils.onlyContent, getters.contentCount(state), 1 + ); + } } else if (state.pageState.viewBy === ReportConstants.ViewBy.CHANNEL) { row.id = item.id; row.title = item.title; @@ -38,6 +45,7 @@ function _genRow(state, item) { // CONTENT NODES row.kind = item.kind; row.id = item.id; + row.contentId = item.contentId; row.title = item.title; row.parent = { id: item.parent.id, title: item.parent.title }; @@ -135,6 +143,12 @@ Object.assign(getters, { if (state.pageState.sortOrder !== ReportConstants.SortOrders.NONE) { data.sort(ReportUtils.genCompareFunc(state.pageState.sortColumn, state.pageState.sortOrder)); } + if (state.pageState.showRecentOnly) { + return data.filter(row => + Boolean(row.lastActive) && + differenceInDays(now(), row.lastActive) <= ReportConstants.RECENCY_THRESHOLD_IN_DAYS + ); + } return data; }, }); diff --git a/kolibri/plugins/coach/assets/src/state/store.js b/kolibri/plugins/coach/assets/src/state/store.js index b707b372f99..ae4a27bdd02 100644 --- a/kolibri/plugins/coach/assets/src/state/store.js +++ b/kolibri/plugins/coach/assets/src/state/store.js @@ -24,9 +24,6 @@ const mutations = { }, // report - SET_RECENT_ONLY(state, showRecentOnly) { - Vue.set(state.pageState, 'showRecentOnly', showRecentOnly); - }, SET_REPORT_SORTING(state, sortColumn, sortOrder) { Vue.set(state.pageState, 'sortColumn', sortColumn); Vue.set(state.pageState, 'sortOrder', sortOrder); @@ -38,6 +35,7 @@ const mutations = { Vue.set(state.pageState, 'userScope', options.userScope); Vue.set(state.pageState, 'userScopeId', options.userScopeId); Vue.set(state.pageState, 'viewBy', options.viewBy); + Vue.set(state.pageState, 'showRecentOnly', options.showRecentOnly); }, SET_REPORT_TABLE_DATA(state, tableData) { Vue.set(state.pageState, 'tableData', tableData); diff --git a/kolibri/plugins/coach/assets/src/views/class-list-page/index.vue b/kolibri/plugins/coach/assets/src/views/class-list-page/index.vue index 838ad4db28d..469eb969eaf 100644 --- a/kolibri/plugins/coach/assets/src/views/class-list-page/index.vue +++ b/kolibri/plugins/coach/assets/src/views/class-list-page/index.vue @@ -58,7 +58,7 @@ methods: { recentPageLink(id) { return { - name: constants.PageNames.RECENT_CHANNELS, + name: constants.PageNames.TOPIC_CHANNELS, params: { classId: id }, }; }, diff --git a/kolibri/plugins/coach/assets/src/views/exam-report-page/index.vue b/kolibri/plugins/coach/assets/src/views/exam-report-page/index.vue index 456420fa2f2..e354d738068 100644 --- a/kolibri/plugins/coach/assets/src/views/exam-report-page/index.vue +++ b/kolibri/plugins/coach/assets/src/views/exam-report-page/index.vue @@ -49,11 +49,11 @@ </td> <td class="table-data"> - <span v-if="examTaker.score === undefined">&mdash;</span> + <span v-if="examTaker.score === undefined">–</span> <span v-else>{{ $tr('scorePercentage', { num: examTaker.score / exam.question_count }) }}</span> </td> - <td class="table-data">{{ examTaker.group.name || $tr('ungrouped') }}</td> + <td class="table-data">{{ examTaker.group.name || '–' }}</td> </tr> </tbody> </table> @@ -126,7 +126,6 @@ scorePercentage: '{num, number, percent}', group: 'Group', noExamData: 'No data to show.', - ungrouped: 'Ungrouped', }, }; diff --git a/kolibri/plugins/coach/assets/src/views/index.vue b/kolibri/plugins/coach/assets/src/views/index.vue index 23285ee0b19..18bec3814b3 100644 --- a/kolibri/plugins/coach/assets/src/views/index.vue +++ b/kolibri/plugins/coach/assets/src/views/index.vue @@ -77,11 +77,11 @@ [Constants.PageNames.TOPIC_ITEM_LIST]: 'item-list-page', [Constants.PageNames.TOPIC_LEARNERS_FOR_ITEM]: 'learner-list-page', [Constants.PageNames.TOPIC_LEARNER_ITEM_DETAILS]: 'learner-exercise-detail-page', - [Constants.PageNames.LEARNER_LIST]: 'item-list-page', + [Constants.PageNames.LEARNER_LIST]: 'learner-list-page', [Constants.PageNames.LEARNER_CHANNELS]: 'channel-list-page', [Constants.PageNames.LEARNER_CHANNEL_ROOT]: 'item-list-page', [Constants.PageNames.LEARNER_ITEM_LIST]: 'item-list-page', - [Constants.PageNames.LEARNER_ITEM_DETAILS]: 'learner-item-details-page', + [Constants.PageNames.LEARNER_ITEM_DETAILS]: 'learner-exercise-detail-page', [Constants.PageNames.EXAM_REPORT]: 'exam-report-page', [Constants.PageNames.EXAM_REPORT_DETAIL]: 'exam-report-detail-page', }; diff --git a/kolibri/plugins/coach/assets/src/views/reports/channel-list-page.vue b/kolibri/plugins/coach/assets/src/views/reports/channel-list-page.vue index 916b7982c62..e9e23d838f6 100644 --- a/kolibri/plugins/coach/assets/src/views/reports/channel-list-page.vue +++ b/kolibri/plugins/coach/assets/src/views/reports/channel-list-page.vue @@ -3,11 +3,10 @@ <div> <div v-if="showRecentOnly" ref="recentHeader"> <h1>{{ $tr('recentTitle') }}</h1> - <sub v-if="anyActivity">{{ $tr('recentSubHeading') }}</sub> - <sub v-else>{{ $tr('noRecentSubHeading') }}</sub> + <report-subheading /> </div> - <report-table v-if="anyActivity" :caption="$tr('channelList')"> + <report-table v-if="standardDataTable.length" :caption="$tr('channelList')"> <thead slot="thead"> <tr> <header-cell @@ -24,7 +23,7 @@ </thead> <tbody slot="tbody"> <template v-for="channel in standardDataTable"> - <tr v-if="channelIsVisible(channel.lastActive)" :key="channel.id"> + <tr :key="channel.id"> <name-cell :kind="CHANNEL" :title="channel.title" :link="reportLink(channel.id)"/> <activity-cell :date="channel.lastActive"/> </tr> @@ -40,8 +39,6 @@ const { ContentNodeKinds } = require('kolibri.coreVue.vuex.constants'); const { PageNames } = require('../../constants'); - const differenceInDays = require('date-fns/difference_in_days'); - const { now } = require('kolibri.utils.serverClock'); const reportConstants = require('../../reportConstants'); const reportGetters = require('../../state/getters/reports'); @@ -50,19 +47,13 @@ $trNameSpace: 'coachRecentPageChannelList', $trs: { recentTitle: 'Recent Activity', - recentSubHeading: 'Showing recent activity in past 7 days', - noRecentSubHeading: 'No recent activity in past 7 days', channels: 'Channels', channelList: 'Channel list', lastActivity: 'Last active', }, - data() { - return { - currentDateTime: now(), - }; - }, components: { 'report-table': require('./report-table'), + 'report-subheading': require('./report-subheading'), 'header-cell': require('./table-cells/header-cell'), 'name-cell': require('./table-cells/name-cell'), 'activity-cell': require('./table-cells/activity-cell'), @@ -74,19 +65,8 @@ tableColumns() { return reportConstants.TableColumns; }, - anyActivity() { - return this.standardDataTable.some(channel => this.channelIsVisible(channel.lastActive)); - }, }, methods: { - channelIsVisible(lastActiveTime) { - const THREHOLD_IN_DAYS = 7; - if (!this.showRecentOnly) return true; - return ( - Boolean(lastActiveTime) && - differenceInDays(this.currentDateTime, lastActiveTime) <= THREHOLD_IN_DAYS - ); - }, reportLink(channelId) { const linkTargets = { [PageNames.RECENT_CHANNELS]: PageNames.RECENT_ITEMS_FOR_CHANNEL, diff --git a/kolibri/plugins/coach/assets/src/views/reports/item-list-page.vue b/kolibri/plugins/coach/assets/src/views/reports/item-list-page.vue index cd3dcda5ffb..23e098c6b96 100644 --- a/kolibri/plugins/coach/assets/src/views/reports/item-list-page.vue +++ b/kolibri/plugins/coach/assets/src/views/reports/item-list-page.vue @@ -87,24 +87,47 @@ }, methods: { genRowLink(row) { - if (row.kind === CoreConstants.ContentNodeKinds.TOPIC) { + if (CoachConstants.TopicReports.includes(this.pageName)) { + if (row.kind === CoreConstants.ContentNodeKinds.TOPIC) { + return { + name: CoachConstants.PageNames.TOPIC_ITEM_LIST, + params: { + classId: this.classId, + channelId: this.pageState.channelId, + topicId: row.id, + }, + }; + } return { - name: CoachConstants.PageNames.TOPIC_ITEM_LIST, + name: CoachConstants.PageNames.TOPIC_LEARNERS_FOR_ITEM, params: { classId: this.classId, channelId: this.pageState.channelId, - topicId: row.id, - } + contentId: row.id, + }, }; - } - return { - name: CoachConstants.PageNames.TOPIC_LEARNERS_FOR_ITEM, - params: { - classId: this.classId, - channelId: this.pageState.channelId, - contentId: row.id, + } else if (CoachConstants.LearnerReports.includes(this.pageName)) { + if (row.kind === CoreConstants.ContentNodeKinds.TOPIC) { + return { + name: CoachConstants.PageNames.LEARNER_ITEM_LIST, + params: { + classId: this.classId, + channelId: this.pageState.channelId, + topicId: row.id, + }, + }; + } else if (row.kind === CoreConstants.ContentNodeKinds.EXERCISE) { + return { + name: CoachConstants.PageNames.LEARNER_ITEM_DETAILS_ROOT, + params: { + classId: this.classId, + channelId: this.pageState.channelId, + contentId: row.id, + }, + }; } - }; + } + return null; }, }, computed: { @@ -115,6 +138,7 @@ vuex: { getters: { classId: state => state.classId, + pageName: state => state.pageName, pageState: state => state.pageState, exerciseCount: reportGetters.exerciseCount, contentCount: reportGetters.contentCount, diff --git a/kolibri/plugins/coach/assets/src/views/reports/learner-exercise-detail-page/index.vue b/kolibri/plugins/coach/assets/src/views/reports/learner-exercise-detail-page/index.vue index fbf9b9f3150..1e8d4089661 100644 --- a/kolibri/plugins/coach/assets/src/views/reports/learner-exercise-detail-page/index.vue +++ b/kolibri/plugins/coach/assets/src/views/reports/learner-exercise-detail-page/index.vue @@ -1,9 +1,6 @@ <template> - <immersive-full-screen - :backPageLink="backPageLink" - :backPageText="$tr('backPrompt', { exerciseTitle: exercise.title })" - > + <immersive-full-screen :backPageLink="backPageLink" :backPageText="backPageText"> <template> <div class="summary-container"> <attempt-summary @@ -57,7 +54,7 @@ module.exports = { $trNameSpace: 'coachExerciseRenderPage', $trs: { - backPrompt: 'Back to { exerciseTitle }', + backPrompt: 'Back to { backTitle }', }, components: { 'immersive-full-screen': require('kolibri.coreVue.components.immersiveFullScreen'), @@ -74,7 +71,7 @@ params: { classId: this.classId, channelId: this.channelId, - contentId: this.contentId, + contentId: this.exercise.pk, } }; } @@ -84,31 +81,41 @@ params: { classId: this.classId, channelId: this.channelId, - contentId: this.contentId, + contentId: this.exercise.pk, } }; } - return { - name: constants.PageNames.LEARNER_ITEM_LIST, - params: { - classId: this.classId, - channelId: this.channelId, - contentId: this.contentId, - } - }; + if (this.pageName === constants.PageNames.LEARNER_ITEM_DETAILS) { + return { + name: constants.PageNames.LEARNER_ITEM_LIST, + params: { + classId: this.classId, + channelId: this.channelId, + userId: this.user.id, + topicId: this.parentTopic.pk, + } + }; + } + return undefined; + }, + backPageText() { + if (constants.LearnerReports.includes(this.pageName)) { + return this.$tr('backPrompt', { backTitle: this.parentTopic.title }); + } + return this.$tr('backPrompt', { backTitle: this.exercise.title }); + }, + parentTopic() { + return this.exercise.ancestors[this.exercise.ancestors.length - 1]; }, }, methods: { - backtoText(text) { - return this.$tr('backto', { text }); - }, navigateToNewAttempt(attemptLogIndex) { this.$router.push({ name: this.pageName, params: { channelId: this.channelId, userId: this.user.id, - contentId: this.exercise.content_id, + contentId: this.exercise.pk, interactionIndex: 0, attemptLogIndex, }, @@ -120,7 +127,7 @@ params: { channelId: this.channelId, userId: this.user.id, - contentId: this.exercise.content_id, + contentId: this.exercise.pk, attemptLogIndex: this.attemptLogIndex, interactionIndex, }, @@ -136,7 +143,6 @@ currentInteraction: state => state.pageState.currentInteraction, currentInteractionHistory: state => state.pageState.currentInteractionHistory, classId: state => state.classId, - contentId: state => state.pageState.exercise.pk, channelId: state => state.pageState.channelId, user: state => state.pageState.user, exercise: state => state.pageState.exercise, diff --git a/kolibri/plugins/coach/assets/src/views/reports/learner-list-page.vue b/kolibri/plugins/coach/assets/src/views/reports/learner-list-page.vue index 3dd47f46362..8f7c39a66c0 100644 --- a/kolibri/plugins/coach/assets/src/views/reports/learner-list-page.vue +++ b/kolibri/plugins/coach/assets/src/views/reports/learner-list-page.vue @@ -3,20 +3,43 @@ <div> <breadcrumbs/> - <h1> + <h1 v-if="!isRootLearnerPage"> <content-icon :kind="pageState.contentScopeSummary.kind" colorstyle="text-default" /> {{ pageState.contentScopeSummary.title }} </h1> + <report-subheading /> <report-table> <thead slot="thead"> <tr> - <header-cell :text="$tr('name')" align="left"/> - <header-cell :text="isExercisePage ? $tr('exerciseProgress') : $tr('contentProgress')"/> - <header-cell :text="$tr('lastActivity')" align="left"/> + <header-cell + align="left" + :text="$tr('name')" + :column="TableColumns.NAME" + :sortable="true" + /> + <header-cell + v-if="!isRootLearnerPage" + :text="isExercisePage ? $tr('exerciseProgress') : $tr('contentProgress')" + :column="isExercisePage ? TableColumns.EXERCISE : TableColumns.CONTENT" + :sortable="true" + /> + <header-cell + align="left" + :text="$tr('group')" + :column="TableColumns.GROUP" + :sortable="true" + /> + <header-cell + align="left" + v-if="!isRootLearnerPage" + :text="$tr('lastActivity')" + :column="TableColumns.DATE" + :sortable="true" + /> </tr> </thead> <tbody slot="tbody"> @@ -27,10 +50,12 @@ :link="genLink(row)" /> <progress-cell + v-if="!isRootLearnerPage" :num="isExercisePage ? row.exerciseProgress : row.contentProgress" :isExercise="isExercisePage" /> - <activity-cell :date="row.lastActive" /> + <td>{{ row.groupName || '–' }}</td> + <activity-cell v-if="!isRootLearnerPage" :date="row.lastActive" /> </tr> </tbody> </report-table> @@ -45,11 +70,13 @@ const CoreConstants = require('kolibri.coreVue.vuex.constants'); const CoachConstants = require('../../constants'); const reportGetters = require('../../state/getters/reports'); + const ReportConstants = require('../../reportConstants'); module.exports = { $trNameSpace: 'learnerReportPage', $trs: { name: 'Name', + group: 'Group', exerciseProgress: 'Exercise progress', contentProgress: 'Resource progress', lastActivity: 'Last activity', @@ -60,6 +87,7 @@ 'content-icon': require('kolibri.coreVue.components.contentIcon'), 'breadcrumbs': require('./breadcrumbs'), 'report-table': require('./report-table'), + 'report-subheading': require('./report-subheading'), 'header-cell': require('./table-cells/header-cell'), 'name-cell': require('./table-cells/name-cell'), 'progress-cell': require('./table-cells/progress-cell'), @@ -69,6 +97,12 @@ isExercisePage() { return this.pageState.contentScopeSummary.kind === CoreConstants.ContentNodeKinds.EXERCISE; }, + isRootLearnerPage() { + return this.pageName === CoachConstants.PageNames.LEARNER_LIST; + }, + TableColumns() { + return ReportConstants.TableColumns; + }, }, methods: { genLink(row) { @@ -82,7 +116,15 @@ classId: this.classId, userId: row.id, channelId: this.pageState.channelId, - contentId: this.pageState.contentScopeSummary.contentId, + contentId: this.pageState.contentScopeSummary.id, + } + }; + } else if (this.isRootLearnerPage) { + return { + name: CoachConstants.PageNames.LEARNER_CHANNELS, + params: { + classId: this.classId, + userId: row.id, } }; } diff --git a/kolibri/plugins/coach/assets/src/views/reports/recent-items-page.vue b/kolibri/plugins/coach/assets/src/views/reports/recent-items-page.vue index 7aa8416fff4..081872ed876 100644 --- a/kolibri/plugins/coach/assets/src/views/reports/recent-items-page.vue +++ b/kolibri/plugins/coach/assets/src/views/reports/recent-items-page.vue @@ -4,8 +4,7 @@ <breadcrumbs/> <h1>{{ $tr('title') }}</h1> - <sub v-if="standardDataTable.length">{{ $tr('subHeading') }}</sub> - <sub v-else>{{ $tr('noRecentProgress') }}</sub> + <report-subheading /> <report-table v-if="standardDataTable.length"> <thead slot="thead"> @@ -61,10 +60,8 @@ $trNameSpace: 'coachRecentReports', $trs: { title: 'Recent Activity', - subHeading: 'Showing recent activity in past 7 days', name: 'Name', progress: 'Class progress', - noRecentProgress: 'No recent activity in past 7 days', reportProgress: '{completed} {descriptor}', listened: '{proportionCompleted} listened', opened: '{proportionCompleted} opened', @@ -75,6 +72,7 @@ components: { 'breadcrumbs': require('./breadcrumbs'), 'report-table': require('./report-table'), + 'report-subheading': require('./report-subheading'), 'header-cell': require('./table-cells/header-cell'), 'name-cell': require('./table-cells/name-cell'), 'activity-cell': require('./table-cells/activity-cell'), diff --git a/kolibri/plugins/coach/assets/src/views/reports/report-subheading.vue b/kolibri/plugins/coach/assets/src/views/reports/report-subheading.vue new file mode 100644 index 00000000000..90ed2bd2b06 --- /dev/null +++ b/kolibri/plugins/coach/assets/src/views/reports/report-subheading.vue @@ -0,0 +1,38 @@ +<template> + + <div v-if="showRecentOnly"> + <sub v-if="standardDataTable.length">{{ $tr('subHeading', { threshold }) }}</sub> + <sub v-else>{{ $tr('noRecentProgress', { threshold }) }}</sub> + </div> + +</template> + + +<script> + + const ReportConstants = require('../../reportConstants'); + const reportGetters = require('../../state/getters/reports'); + + module.exports = { + $trNameSpace: 'coachReportSubheading', + $trs: { + subHeading: 'Only showing activity in past {threshold} days', + noRecentProgress: 'No activity in past {threshold} days', + }, + computed: { + threshold() { + return ReportConstants.RECENCY_THRESHOLD_IN_DAYS; + }, + }, + vuex: { + getters: { + standardDataTable: reportGetters.standardDataTable, + showRecentOnly: state => state.pageState.showRecentOnly, + }, + }, + }; + +</script> + + +<style lang="stylus" scoped></style> diff --git a/kolibri/plugins/coach/assets/src/views/top-nav/index.vue b/kolibri/plugins/coach/assets/src/views/top-nav/index.vue index 73591b3c6b6..15f038654b4 100644 --- a/kolibri/plugins/coach/assets/src/views/top-nav/index.vue +++ b/kolibri/plugins/coach/assets/src/views/top-nav/index.vue @@ -1,33 +1,31 @@ <template> <div class="top"> - <nav-link - :to="recentLink" - :active="isRecentPage" - :text="$tr('recent')" - /> <nav-link :to="topicsLink" :active="isTopicPage" :text="$tr('topics')" /> <nav-link - :to="examsLink" - :active="Constants.ExamPages.includes(pageName)" - :text="$tr('exams')" + :to="recentLink" + :active="isRecentPage" + :text="$tr('recent')" /> - <!-- <nav-link :to="learnersLink" :active="isLearnerPage" :text="$tr('learners')" /> - --> <nav-link :to="groupsLink" :active="pageName === Constants.PageNames.GROUPS" :text="$tr('groups')" /> + <nav-link + :to="examsLink" + :active="Constants.ExamPages.includes(pageName)" + :text="$tr('exams')" + /> </div> </template> diff --git a/kolibri/plugins/coach/assets/test/views/channel-list-page.spec.js b/kolibri/plugins/coach/assets/test/views/channel-list-page.spec.js index aef8d2f6cee..add5f7e1b25 100644 --- a/kolibri/plugins/coach/assets/test/views/channel-list-page.spec.js +++ b/kolibri/plugins/coach/assets/test/views/channel-list-page.spec.js @@ -50,6 +50,7 @@ function makeVm(options = {}, state) { }); const components = { 'name-cell': '<div></div>', + 'report-subheading': '<div></div>', }; const Ctor = Vue.extend(ChannelListPage); return new Ctor(Object.assign(options, { store, components })).$mount(); diff --git a/kolibri/plugins/coach/assets/test/views/exam-report-page.spec.js b/kolibri/plugins/coach/assets/test/views/exam-report-page.spec.js index 319e9794a75..4ac6e63d09d 100644 --- a/kolibri/plugins/coach/assets/test/views/exam-report-page.spec.js +++ b/kolibri/plugins/coach/assets/test/views/exam-report-page.spec.js @@ -81,6 +81,6 @@ describe('exam report page', () => { // score isn't properly formatted assert.equal(getTextInScoreColumn(els.tableRows()[0]), '{num, number, percent} '); // emdash - assert.equal(getTextInScoreColumn(els.tableRows()[1]), '\u2014 '); + assert.equal(getTextInScoreColumn(els.tableRows()[1]), '– '); }); }); diff --git a/kolibri/plugins/coach/serializers.py b/kolibri/plugins/coach/serializers.py index 773e3083049..b1fe23f26f4 100644 --- a/kolibri/plugins/coach/serializers.py +++ b/kolibri/plugins/coach/serializers.py @@ -18,7 +18,7 @@ class UserReportSerializer(serializers.ModelSerializer): class Meta: model = FacilityUser fields = ( - 'pk', 'full_name', 'progress', 'last_active', + 'pk', 'username', 'full_name', 'progress', 'last_active', ) def get_progress(self, target_user):
googleapis__google-cloud-python-5375
Updated SubscriberClient docs `subscribe_experimental` was promoted to `subscribe` but the docs for the `SubscriberClient` still suggested using `subscribe_experimental`
[ { "content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pkg_resources\nimport os\n\nimport grpc\n\nfrom google.api_core import grpc_helpers\n\nfrom google.cloud.pubsub_v1 import _gapic\nfrom google.cloud.pubsub_v1 import types\nfrom google.cloud.pubsub_v1.gapic import subscriber_client\nfrom google.cloud.pubsub_v1.subscriber import futures\nfrom google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager\n\n\n__version__ = pkg_resources.get_distribution('google-cloud-pubsub').version\n\n\n@_gapic.add_methods(subscriber_client.SubscriberClient,\n blacklist=('pull', 'streaming_pull'))\nclass Client(object):\n \"\"\"A subscriber client for Google Cloud Pub/Sub.\n\n This creates an object that is capable of subscribing to messages.\n Generally, you can instantiate this client with no arguments, and you\n get sensible defaults.\n\n Args:\n kwargs (dict): Any additional arguments provided are sent as keyword\n keyword arguments to the underlying\n :class:`~.gapic.pubsub.v1.subscriber_client.SubscriberClient`.\n Generally, you should not need to set additional keyword\n arguments.\n \"\"\"\n def __init__(self, **kwargs):\n # Sanity check: Is our goal to use the emulator?\n # If so, create a grpc insecure channel with the emulator host\n # as the target.\n if os.environ.get('PUBSUB_EMULATOR_HOST'):\n kwargs['channel'] = grpc.insecure_channel(\n target=os.environ.get('PUBSUB_EMULATOR_HOST'),\n )\n\n # Use a custom channel.\n # We need this in order to set appropriate default message size and\n # keepalive options.\n if 'channel' not in kwargs:\n kwargs['channel'] = grpc_helpers.create_channel(\n credentials=kwargs.pop('credentials', None),\n target=self.target,\n scopes=subscriber_client.SubscriberClient._DEFAULT_SCOPES,\n options={\n 'grpc.max_send_message_length': -1,\n 'grpc.max_receive_message_length': -1,\n 'grpc.keepalive_time_ms': 30000,\n }.items(),\n )\n\n # Add the metrics headers, and instantiate the underlying GAPIC\n # client.\n self._api = subscriber_client.SubscriberClient(**kwargs)\n\n @property\n def target(self):\n \"\"\"Return the target (where the API is).\n\n Returns:\n str: The location of the API.\n \"\"\"\n return subscriber_client.SubscriberClient.SERVICE_ADDRESS\n\n @property\n def api(self):\n \"\"\"The underlying gapic API client.\"\"\"\n return self._api\n\n def subscribe(\n self, subscription, callback, flow_control=(),\n scheduler=None):\n \"\"\"Asynchronously start receiving messages on a given subscription.\n\n This method starts a background thread to begin pulling messages from\n a Pub/Sub subscription and scheduling them to be processed using the\n provided ``callback``.\n\n The ``callback`` will be called with an individual\n :class:`google.cloud.pubsub_v1.subscriber.message.Message`. It is the\n responsibility of the callback to either call ``ack()`` or ``nack()``\n on the message when it finished processing. If an exception occurs in\n the callback during processing, the exception is logged and the message\n is ``nack()`` ed.\n\n The ``flow_control`` argument can be used to control the rate of at\n which messages are pulled. The settings are relatively conservative by\n default to prevent \"message hoarding\" - a situation where the client\n pulls a large number of messages but can not process them fast enough\n leading it to \"starve\" other clients of messages. Increasing these\n settings may lead to faster throughput for messages that do not take\n a long time to process.\n\n This method starts the receiver in the background and returns a\n *Future* representing its execution. Waiting on the future (calling\n ``result()``) will block forever or until a non-recoverable error\n is encountered (such as loss of network connectivity). Cancelling the\n future will signal the process to shutdown gracefully and exit.\n\n Example\n\n .. code-block:: python\n\n from google.cloud.pubsub_v1 import subscriber\n\n subscriber_client = pubsub.SubscriberClient()\n\n # existing subscription\n subscription = subscriber_client.subscription_path(\n 'my-project-id', 'my-subscription')\n\n def callback(message):\n print(message)\n message.ack()\n\n future = subscriber.subscribe_experimental(\n subscription, callback)\n\n try:\n future.result()\n except KeyboardInterrupt:\n future.cancel()\n\n Args:\n subscription (str): The name of the subscription. The\n subscription should have already been created (for example,\n by using :meth:`create_subscription`).\n callback (Callable[~.pubsub_v1.subscriber.message.Message]):\n The callback function. This function receives the message as\n its only argument and will be called from a different thread/\n process depending on the scheduling strategy.\n flow_control (~.pubsub_v1.types.FlowControl): The flow control\n settings. Use this to prevent situations where you are\n inundated with too many messages at once.\n scheduler (~.pubsub_v1.subscriber.scheduler.Scheduler): An optional\n *scheduler* to use when executing the callback. This controls\n how callbacks are executed concurrently.\n\n Returns:\n google.cloud.pubsub_v1.futures.StreamingPullFuture: A Future object\n that can be used to manage the background stream.\n \"\"\"\n flow_control = types.FlowControl(*flow_control)\n\n manager = streaming_pull_manager.StreamingPullManager(\n self, subscription, flow_control=flow_control, scheduler=scheduler)\n\n future = futures.StreamingPullFuture(manager)\n\n manager.open(callback)\n\n return future\n", "path": "pubsub/google/cloud/pubsub_v1/subscriber/client.py" } ]
[ { "content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pkg_resources\nimport os\n\nimport grpc\n\nfrom google.api_core import grpc_helpers\n\nfrom google.cloud.pubsub_v1 import _gapic\nfrom google.cloud.pubsub_v1 import types\nfrom google.cloud.pubsub_v1.gapic import subscriber_client\nfrom google.cloud.pubsub_v1.subscriber import futures\nfrom google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager\n\n\n__version__ = pkg_resources.get_distribution('google-cloud-pubsub').version\n\n\n@_gapic.add_methods(subscriber_client.SubscriberClient,\n blacklist=('pull', 'streaming_pull'))\nclass Client(object):\n \"\"\"A subscriber client for Google Cloud Pub/Sub.\n\n This creates an object that is capable of subscribing to messages.\n Generally, you can instantiate this client with no arguments, and you\n get sensible defaults.\n\n Args:\n kwargs (dict): Any additional arguments provided are sent as keyword\n keyword arguments to the underlying\n :class:`~.gapic.pubsub.v1.subscriber_client.SubscriberClient`.\n Generally, you should not need to set additional keyword\n arguments.\n \"\"\"\n def __init__(self, **kwargs):\n # Sanity check: Is our goal to use the emulator?\n # If so, create a grpc insecure channel with the emulator host\n # as the target.\n if os.environ.get('PUBSUB_EMULATOR_HOST'):\n kwargs['channel'] = grpc.insecure_channel(\n target=os.environ.get('PUBSUB_EMULATOR_HOST'),\n )\n\n # Use a custom channel.\n # We need this in order to set appropriate default message size and\n # keepalive options.\n if 'channel' not in kwargs:\n kwargs['channel'] = grpc_helpers.create_channel(\n credentials=kwargs.pop('credentials', None),\n target=self.target,\n scopes=subscriber_client.SubscriberClient._DEFAULT_SCOPES,\n options={\n 'grpc.max_send_message_length': -1,\n 'grpc.max_receive_message_length': -1,\n 'grpc.keepalive_time_ms': 30000,\n }.items(),\n )\n\n # Add the metrics headers, and instantiate the underlying GAPIC\n # client.\n self._api = subscriber_client.SubscriberClient(**kwargs)\n\n @property\n def target(self):\n \"\"\"Return the target (where the API is).\n\n Returns:\n str: The location of the API.\n \"\"\"\n return subscriber_client.SubscriberClient.SERVICE_ADDRESS\n\n @property\n def api(self):\n \"\"\"The underlying gapic API client.\"\"\"\n return self._api\n\n def subscribe(\n self, subscription, callback, flow_control=(),\n scheduler=None):\n \"\"\"Asynchronously start receiving messages on a given subscription.\n\n This method starts a background thread to begin pulling messages from\n a Pub/Sub subscription and scheduling them to be processed using the\n provided ``callback``.\n\n The ``callback`` will be called with an individual\n :class:`google.cloud.pubsub_v1.subscriber.message.Message`. It is the\n responsibility of the callback to either call ``ack()`` or ``nack()``\n on the message when it finished processing. If an exception occurs in\n the callback during processing, the exception is logged and the message\n is ``nack()`` ed.\n\n The ``flow_control`` argument can be used to control the rate of at\n which messages are pulled. The settings are relatively conservative by\n default to prevent \"message hoarding\" - a situation where the client\n pulls a large number of messages but can not process them fast enough\n leading it to \"starve\" other clients of messages. Increasing these\n settings may lead to faster throughput for messages that do not take\n a long time to process.\n\n This method starts the receiver in the background and returns a\n *Future* representing its execution. Waiting on the future (calling\n ``result()``) will block forever or until a non-recoverable error\n is encountered (such as loss of network connectivity). Cancelling the\n future will signal the process to shutdown gracefully and exit.\n\n Example\n\n .. code-block:: python\n\n from google.cloud.pubsub_v1 import subscriber\n\n subscriber_client = pubsub.SubscriberClient()\n\n # existing subscription\n subscription = subscriber_client.subscription_path(\n 'my-project-id', 'my-subscription')\n\n def callback(message):\n print(message)\n message.ack()\n\n future = subscriber.subscribe(\n subscription, callback)\n\n try:\n future.result()\n except KeyboardInterrupt:\n future.cancel()\n\n Args:\n subscription (str): The name of the subscription. The\n subscription should have already been created (for example,\n by using :meth:`create_subscription`).\n callback (Callable[~.pubsub_v1.subscriber.message.Message]):\n The callback function. This function receives the message as\n its only argument and will be called from a different thread/\n process depending on the scheduling strategy.\n flow_control (~.pubsub_v1.types.FlowControl): The flow control\n settings. Use this to prevent situations where you are\n inundated with too many messages at once.\n scheduler (~.pubsub_v1.subscriber.scheduler.Scheduler): An optional\n *scheduler* to use when executing the callback. This controls\n how callbacks are executed concurrently.\n\n Returns:\n google.cloud.pubsub_v1.futures.StreamingPullFuture: A Future object\n that can be used to manage the background stream.\n \"\"\"\n flow_control = types.FlowControl(*flow_control)\n\n manager = streaming_pull_manager.StreamingPullManager(\n self, subscription, flow_control=flow_control, scheduler=scheduler)\n\n future = futures.StreamingPullFuture(manager)\n\n manager.open(callback)\n\n return future\n", "path": "pubsub/google/cloud/pubsub_v1/subscriber/client.py" } ]
diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/client.py b/pubsub/google/cloud/pubsub_v1/subscriber/client.py index b567ed6cb9f2..c4906bbbeb21 100644 --- a/pubsub/google/cloud/pubsub_v1/subscriber/client.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/client.py @@ -135,7 +135,7 @@ def callback(message): print(message) message.ack() - future = subscriber.subscribe_experimental( + future = subscriber.subscribe( subscription, callback) try:
liqd__a4-product-655
Error 500 when trying to edit landing page I need to add a partner to the landing page on beteiligung.in productive soon. Currently, I can’t edit the page (500 error). https://www.beteiligung.in/admin/pages/3/edit/ Could you look into it?
[ { "content": "\"\"\"Django settings for Beteiligung.in.\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nfrom django.utils.translation import ugettext_lazy as _\n\nCONFIG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_DIR = os.path.dirname(CONFIG_DIR)\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n\n # Watch out this needs to be included first\n 'liqd_product.apps.django_overwrites',\n\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n 'background_task',\n\n # Wagtail cms components\n 'wagtail.contrib.forms',\n 'wagtail.contrib.redirects',\n 'wagtail.contrib.settings',\n 'wagtail.contrib.styleguide',\n 'wagtail.embeds',\n 'wagtail.sites',\n 'wagtail.users',\n 'wagtail.snippets',\n 'wagtail.documents',\n 'wagtail.images',\n 'wagtail.search',\n 'wagtail.admin',\n 'wagtail.core',\n 'modelcluster',\n 'taggit',\n 'liqd_product.apps.cms.pages',\n 'liqd_product.apps.cms.settings',\n\n # General adhocracy 4 components\n 'adhocracy4.actions',\n 'adhocracy4.administrative_districts',\n 'adhocracy4.categories',\n 'adhocracy4.ckeditor',\n 'adhocracy4.comments',\n 'adhocracy4.dashboard',\n 'adhocracy4.filters',\n 'adhocracy4.follows',\n 'adhocracy4.forms',\n 'adhocracy4.images',\n 'adhocracy4.labels',\n 'adhocracy4.maps',\n 'adhocracy4.modules',\n 'adhocracy4.organisations',\n 'adhocracy4.phases',\n 'adhocracy4.projects',\n 'adhocracy4.ratings',\n 'adhocracy4.reports',\n 'adhocracy4.rules',\n\n # General components that define models or helpers\n 'liqd_product.apps.actions',\n 'liqd_product.apps.contrib',\n 'liqd_product.apps.maps',\n 'liqd_product.apps.moderatorfeedback',\n 'liqd_product.apps.moderatorremark',\n 'liqd_product.apps.notifications',\n 'liqd_product.apps.organisations',\n 'liqd_product.apps.partners',\n 'liqd_product.apps.users',\n\n # General apps containing views\n 'liqd_product.apps.account',\n 'liqd_product.apps.dashboard',\n 'liqd_product.apps.embed',\n 'liqd_product.apps.exports',\n 'liqd_product.apps.offlineevents',\n 'liqd_product.apps.projects',\n\n # Apps defining phases\n 'liqd_product.apps.activities',\n 'liqd_product.apps.budgeting',\n 'liqd_product.apps.documents',\n 'liqd_product.apps.ideas',\n 'liqd_product.apps.mapideas',\n 'liqd_product.apps.polls',\n)\n\nMIDDLEWARE = (\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django_cloudflare_push.middleware.push_middleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n\n 'liqd_product.apps.partners.middleware.PartnerMiddleware',\n 'liqd_product.apps.embed.middleware.AjaxPathMiddleware',\n 'wagtail.core.middleware.SiteMiddleware',\n 'wagtail.contrib.redirects.middleware.RedirectMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'liqd_product.config.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'wagtail.contrib.settings.context_processors.settings'\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'liqd_product.config.wsgi.application'\n\nREVERSE_METHOD = 'liqd_product.apps.partners.urlresolvers.reverse'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'de-DE'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1500, 500)},\n 'tileimage': {'min_resolution': (500, 300)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (600, 400)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n 'avatar': {'size': (200, 200), 'crop': 'smart'},\n 'item_image': {'size': (330, 0), 'crop': 'scale'},\n 'map_thumbnail': {'size': (200, 200), 'crop': 'smart'}\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Authentication\n\nAUTH_USER_MODEL = 'liqd_product_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'liqd_product.apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'liqd_product.apps.users.forms.TermsSignupForm'\nSOCIALACCOUNT_EMAIL_VERIFICATION = 'none'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# CKEditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = 'username'\nCKEDITOR_ALLOW_NONIMAGE_FILES = True\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'collapsible-image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink'],\n ['CollapsibleItem']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n },\n 'collapsible-image-editor': {\n 'tags': ['p', 'strong', 'em', 'u', 'ol', 'li', 'ul', 'a', 'img',\n 'div'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style'],\n 'div': ['class']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n# Wagtail\nWAGTAIL_SITE_NAME = 'Beteiligung.in'\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'liqd_product_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_documents', 'chapter'),\n ('liqd_product_documents', 'paragraph'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n ('liqd_product_polls', 'poll'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_budgeting', 'proposal'),\n ('liqd_product_mapideas', 'mapidea'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n # Disabled to keep current behaviour: the auto follow functionality did\n # not work until 2018/03/21 due to a adhocracy4 bug\n # ('a4comments', 'comment'),\n # ('liqd_product_ideas', 'idea'),\n # ('liqd_product_mapideas', 'mapidea'),\n)\n\nA4_CATEGORIZABLE = (\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n)\n\nA4_LABELS_ADDABLE = (\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n)\n\nA4_CATEGORY_ICONS = (\n ('', _('Pin without icon')),\n ('diamant', _('Diamond')),\n ('dreieck_oben', _('Triangle up')),\n ('dreieck_unten', _('Triangle down')),\n ('ellipse', _('Ellipse')),\n ('halbkreis', _('Semi circle')),\n ('hexagon', _('Hexagon')),\n ('parallelogramm', _('Rhomboid')),\n ('pentagramm', _('Star')),\n ('quadrat', _('Square')),\n ('raute', _('Octothorpe')),\n ('rechtecke', _('Rectangle')),\n ('ring', _('Circle')),\n ('rw_dreieck', _('Right triangle')),\n ('zickzack', _('Zigzag'))\n)\n\n\nA4_MAP_BASEURL = 'https://{s}.tile.openstreetmap.org/'\nA4_MAP_ATTRIBUTION = '&copy; <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[54.983, 15.016], [47.302, 5.988]])\n\nA4_DASHBOARD = {\n 'PROJECT_DASHBOARD_CLASS': 'liqd_product.apps.dashboard.ProjectDashboard',\n 'BLUEPRINTS': 'liqd_product.apps.dashboard.blueprints.blueprints'\n}\n\nA4_PROJECT_TOPICS = (\n ('ANT', _('Anti-discrimination')),\n ('WOR', _('Work & economy')),\n ('BUI', _('Building & living')),\n ('EDU', _('Education & research')),\n ('CHI', _('Children, youth & family')),\n ('FIN', _('Finances')),\n ('HEA', _('Health & sports')),\n ('INT', _('Integration')),\n ('CUL', _('Culture & leisure')),\n ('NEI', _('Neighborhood & participation')),\n ('URB', _('Urban development')),\n ('ENV', _('Environment & public green space')),\n ('TRA', _('Traffic'))\n)\n\nCONTACT_EMAIL = '[email protected]'\n\n# The default language is used for emails and strings\n# that are stored translated to the database.\nDEFAULT_LANGUAGE = 'de'\n", "path": "liqd_product/config/settings/base.py" } ]
[ { "content": "\"\"\"Django settings for Beteiligung.in.\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nfrom django.utils.translation import ugettext_lazy as _\n\nCONFIG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_DIR = os.path.dirname(CONFIG_DIR)\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n\n # Watch out this needs to be included first\n 'liqd_product.apps.django_overwrites',\n\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n 'background_task',\n\n # Wagtail cms components\n 'wagtail.contrib.forms',\n 'wagtail.contrib.redirects',\n 'wagtail.contrib.settings',\n 'wagtail.contrib.styleguide',\n 'wagtail.embeds',\n 'wagtail.sites',\n 'wagtail.users',\n 'wagtail.snippets',\n 'wagtail.documents',\n 'wagtail.images',\n 'wagtail.search',\n 'wagtail.admin',\n 'wagtail.core',\n 'modelcluster',\n 'taggit',\n 'liqd_product.apps.cms.pages',\n 'liqd_product.apps.cms.settings',\n\n # General adhocracy 4 components\n 'adhocracy4.actions',\n 'adhocracy4.administrative_districts',\n 'adhocracy4.categories',\n 'adhocracy4.ckeditor',\n 'adhocracy4.comments',\n 'adhocracy4.dashboard',\n 'adhocracy4.filters',\n 'adhocracy4.follows',\n 'adhocracy4.forms',\n 'adhocracy4.images',\n 'adhocracy4.labels',\n 'adhocracy4.maps',\n 'adhocracy4.modules',\n 'adhocracy4.organisations',\n 'adhocracy4.phases',\n 'adhocracy4.projects',\n 'adhocracy4.ratings',\n 'adhocracy4.reports',\n 'adhocracy4.rules',\n\n # General components that define models or helpers\n 'liqd_product.apps.actions',\n 'liqd_product.apps.contrib',\n 'liqd_product.apps.maps',\n 'liqd_product.apps.moderatorfeedback',\n 'liqd_product.apps.moderatorremark',\n 'liqd_product.apps.notifications',\n 'liqd_product.apps.organisations',\n 'liqd_product.apps.partners',\n 'liqd_product.apps.users',\n\n # General apps containing views\n 'liqd_product.apps.account',\n 'liqd_product.apps.dashboard',\n 'liqd_product.apps.embed',\n 'liqd_product.apps.exports',\n 'liqd_product.apps.offlineevents',\n 'liqd_product.apps.projects',\n\n # Apps defining phases\n 'liqd_product.apps.activities',\n 'liqd_product.apps.budgeting',\n 'liqd_product.apps.documents',\n 'liqd_product.apps.ideas',\n 'liqd_product.apps.mapideas',\n 'liqd_product.apps.polls',\n)\n\nMIDDLEWARE = (\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django_cloudflare_push.middleware.push_middleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n\n 'liqd_product.apps.partners.middleware.PartnerMiddleware',\n 'liqd_product.apps.embed.middleware.AjaxPathMiddleware',\n 'wagtail.core.middleware.SiteMiddleware',\n 'wagtail.contrib.redirects.middleware.RedirectMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'liqd_product.config.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'wagtail.contrib.settings.context_processors.settings'\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'liqd_product.config.wsgi.application'\n\nREVERSE_METHOD = 'liqd_product.apps.partners.urlresolvers.reverse'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'de-DE'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1500, 500)},\n 'tileimage': {'min_resolution': (500, 300)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (600, 400)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n 'avatar': {'size': (200, 200), 'crop': 'smart'},\n 'item_image': {'size': (330, 0), 'crop': 'scale'},\n 'map_thumbnail': {'size': (200, 200), 'crop': 'smart'}\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Authentication\n\nAUTH_USER_MODEL = 'liqd_product_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'liqd_product.apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'liqd_product.apps.users.forms.TermsSignupForm'\nSOCIALACCOUNT_EMAIL_VERIFICATION = 'none'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# CKEditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = 'username'\nCKEDITOR_ALLOW_NONIMAGE_FILES = True\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'collapsible-image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink'],\n ['CollapsibleItem']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n },\n 'collapsible-image-editor': {\n 'tags': ['p', 'strong', 'em', 'u', 'ol', 'li', 'ul', 'a', 'img',\n 'div'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style'],\n 'div': ['class']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n# Wagtail\nWAGTAIL_SITE_NAME = 'Beteiligung.in'\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'liqd_product_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_documents', 'chapter'),\n ('liqd_product_documents', 'paragraph'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n ('liqd_product_polls', 'poll'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_budgeting', 'proposal'),\n ('liqd_product_mapideas', 'mapidea'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n # Disabled to keep current behaviour: the auto follow functionality did\n # not work until 2018/03/21 due to a adhocracy4 bug\n # ('a4comments', 'comment'),\n # ('liqd_product_ideas', 'idea'),\n # ('liqd_product_mapideas', 'mapidea'),\n)\n\nA4_CATEGORIZABLE = (\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n)\n\nA4_LABELS_ADDABLE = (\n ('liqd_product_ideas', 'idea'),\n ('liqd_product_mapideas', 'mapidea'),\n ('liqd_product_budgeting', 'proposal'),\n)\n\nA4_CATEGORY_ICONS = (\n ('', _('Pin without icon')),\n ('diamant', _('Diamond')),\n ('dreieck_oben', _('Triangle up')),\n ('dreieck_unten', _('Triangle down')),\n ('ellipse', _('Ellipse')),\n ('halbkreis', _('Semi circle')),\n ('hexagon', _('Hexagon')),\n ('parallelogramm', _('Rhomboid')),\n ('pentagramm', _('Star')),\n ('quadrat', _('Square')),\n ('raute', _('Octothorpe')),\n ('rechtecke', _('Rectangle')),\n ('ring', _('Circle')),\n ('rw_dreieck', _('Right triangle')),\n ('zickzack', _('Zigzag'))\n)\n\n\nA4_MAP_BASEURL = 'https://{s}.tile.openstreetmap.org/'\nA4_MAP_ATTRIBUTION = '&copy; <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[54.983, 15.016], [47.302, 5.988]])\n\nA4_DASHBOARD = {\n 'PROJECT_DASHBOARD_CLASS': 'liqd_product.apps.dashboard.ProjectDashboard',\n 'BLUEPRINTS': 'liqd_product.apps.dashboard.blueprints.blueprints'\n}\n\nA4_PROJECT_TOPICS = (\n ('ANT', _('Anti-discrimination')),\n ('WOR', _('Work & economy')),\n ('BUI', _('Building & living')),\n ('EDU', _('Education & research')),\n ('CHI', _('Children, youth & family')),\n ('FIN', _('Finances')),\n ('HEA', _('Health & sports')),\n ('INT', _('Integration')),\n ('CUL', _('Culture & leisure')),\n ('NEI', _('Neighborhood & participation')),\n ('URB', _('Urban development')),\n ('ENV', _('Environment & public green space')),\n ('TRA', _('Traffic'))\n)\n\nCONTACT_EMAIL = '[email protected]'\n\n# The default language is used for emails and strings\n# that are stored translated to the database.\nDEFAULT_LANGUAGE = 'de'\n\nWAGTAILADMIN_RICH_TEXT_EDITORS = {\n 'default': {\n 'WIDGET': 'wagtail.admin.rich_text.HalloRichTextArea'\n }\n}\n", "path": "liqd_product/config/settings/base.py" } ]
diff --git a/liqd_product/config/settings/base.py b/liqd_product/config/settings/base.py index 83ee5957d..42e41e75f 100644 --- a/liqd_product/config/settings/base.py +++ b/liqd_product/config/settings/base.py @@ -444,3 +444,9 @@ # The default language is used for emails and strings # that are stored translated to the database. DEFAULT_LANGUAGE = 'de' + +WAGTAILADMIN_RICH_TEXT_EDITORS = { + 'default': { + 'WIDGET': 'wagtail.admin.rich_text.HalloRichTextArea' + } +}
bids-standard__pybids-467
Improved reprs for SQLAlchemy model objects This should help provide more useful exception messages for obscure SQLAlchemy error conditions. And just be generally more readable in a REPL. Related to #465. Including a .json sidecar for events.tsv files causes a SQLAlchemy Tag conflict We have an fMRIPrep user experiencing the following issue (more details here https://neurostars.org/t/naming-of-bids-events-tsv-files-seems-to-disrupt-fmriprep-1-5-0rc1/4771): ``` File "/usr/local/miniconda/lib/python3.7/site-packages/fmriprep/cli/run.py", line 524, in build_workflow layout = BIDSLayout(str(bids_dir), validate=False) File "/usr/local/miniconda/lib/python3.7/site-packages/bids/layout/layout.py", line 212, in __init__ indexer.index_metadata() File "/usr/local/miniconda/lib/python3.7/site-packages/bids/layout/index.py", line 338, in index_metadata self.session.commit() File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 1027, in commit self.transaction.commit() File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 494, in commit self._prepare_impl() File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 473, in _prepare_impl self.session.flush() File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2459, in flush self._flush(objects) File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush transaction.rollback(_capture_exception=True) File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__ compat.reraise(exc_type, exc_value, exc_tb) File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 153, in reraise raise value File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush flush_context.execute() File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute rec.execute(self) File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute uow, File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 213, in save_obj ) in _organize_states_for_save(base_mapper, states, uowtransaction): File "/usr/local/miniconda/lib/python3.7/site-packages/sqlalchemy/orm/persistence.py", line 408, in _organize_states_for_save % (state_str(state), instance_key, state_str(existing)) sqlalchemy.orm.exc.FlushError: New instance <Tag at 0x2b66784f7ba8> with identity key (<class 'bids.layout.models.Tag'>, ('/inp/sub-02/func/sub-02_task-Emotion_run-1_events.tsv', 'run'), None) conflicts with persistent instance <Tag at 0x2b66784c6630> ```
[ { "content": "from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.ext.associationproxy import association_proxy\nfrom sqlalchemy.orm.collections import attribute_mapped_collection\nfrom sqlalchemy import (Column, Integer, String, Boolean, ForeignKey, Table)\nfrom sqlalchemy.orm import reconstructor, relationship, backref, object_session\nimport re\nimport os\nimport warnings\nimport json\nfrom copy import deepcopy\nfrom itertools import chain\n\nfrom .writing import build_path, write_contents_to_file\nfrom ..utils import listify\nfrom ..config import get_option\nfrom ..external import six\n\n\nBase = declarative_base()\n\n\nclass Config(Base):\n \"\"\" Container for BIDS configuration information.\n\n Args:\n name (str): The name to give the Config (e.g., 'bids').\n entities (list): A list of dictionaries containing entity configuration\n information.\n default_path_patterns (list): Optional list of patterns used to build\n new paths.\n session (Session, None): an optional SQLAlchemy session. If passed,\n the session is used to update the database with any newly created\n Entity objects. If None, no database update occurs.\n \"\"\"\n __tablename__ = 'configs'\n\n name = Column(String, primary_key=True)\n _default_path_patterns = Column(String)\n entities = relationship(\n \"Entity\", secondary=\"config_to_entity_map\",\n collection_class=attribute_mapped_collection('name'))\n\n def __init__(self, name, entities=None, default_path_patterns=None,\n session=None):\n\n self.name = name\n self.default_path_patterns = default_path_patterns\n self._default_path_patterns = json.dumps(default_path_patterns)\n\n if entities:\n for ent in entities:\n if session is not None:\n existing = (session.query(Config)\n .filter_by(name=ent['name']).first())\n else:\n existing = None\n ent = existing or Entity(**ent)\n self.entities[ent.name] = ent\n if session is not None:\n session.add_all(list(self.entities.values()))\n session.commit()\n\n @reconstructor\n def _init_on_load(self):\n self.default_path_patterns = json.loads(self._default_path_patterns)\n\n @classmethod\n def load(self, config, session=None):\n ''' Load a Config instance from the passed configuration data.\n\n Args:\n config (str, dict): A string or dict containing configuration\n information. Must be one of:\n * A string giving the name of a predefined config file\n (e.g., 'bids' or 'derivatives')\n * A path to a JSON file containing config information\n * A dictionary containing config information\n session (Session, None): An optional SQLAlchemy Session instance.\n If passed, the session is used to check the database for (and\n return) an existing Config with name defined in config['name'].\n\n Returns: A Config instance.\n '''\n\n if isinstance(config, six.string_types):\n config_paths = get_option('config_paths')\n if config in config_paths:\n config = config_paths[config]\n if not os.path.exists(config):\n raise ValueError(\"{} is not a valid path.\".format(config))\n else:\n with open(config, 'r') as f:\n config = json.load(f)\n\n # Return existing Config record if one exists\n if session is not None:\n result = (session.query(Config)\n .filter_by(name=config['name']).first())\n if result:\n return result\n\n return Config(session=session, **config)\n\n\nclass BIDSFile(Base):\n \"\"\" Represents a single file or directory in a BIDS dataset.\n\n Args:\n filename (str): The path to the corresponding file.\n \n \"\"\"\n __tablename__ = 'files'\n\n path = Column(String, primary_key=True)\n filename = Column(String)\n dirname = Column(String)\n entities = association_proxy(\"tags\", \"value\")\n is_dir = Column(Boolean)\n class_ = Column(String(20))\n\n _associations = relationship('BIDSFile', secondary='associations',\n primaryjoin='FileAssociation.dst == BIDSFile.path',\n secondaryjoin='FileAssociation.src == BIDSFile.path')\n\n __mapper_args__ = {\n 'polymorphic_on': class_,\n 'polymorphic_identity': 'file'\n }\n\n def __init__(self, filename):\n self.path = filename\n self.filename = os.path.basename(self.path)\n self.dirname = os.path.dirname(self.path)\n self.is_dir = not self.filename\n self._init_on_load()\n\n def __getattr__(self, attr):\n # Ensures backwards compatibility with old File_ namedtuple, which is\n # deprecated as of 0.7.\n # _ check first to not mask away access to __setstate__ etc.\n # AFAIK None of the entities are allowed to start with _ anyways\n # so the check is more generic than __\n if not attr.startswith('_') and attr in self.entities:\n warnings.warn(\"Accessing entities as attributes is deprecated as \"\n \"of 0.7. Please use the .entities dictionary instead\"\n \" (i.e., .entities['%s'] instead of .%s.\"\n % (attr, attr))\n return self.entities[attr]\n raise AttributeError(\"%s object has no attribute named %r\" %\n (self.__class__.__name__, attr))\n\n def __repr__(self):\n return \"<{} filename='{}'>\".format(self.__class__.__name__, self.path)\n\n def __fspath__(self):\n return self.path\n\n @reconstructor\n def _init_on_load(self):\n self._data = None\n\n def get_associations(self, kind=None, include_parents=False):\n \"\"\" Get associated files, optionally limiting by association kind.\n\n Args:\n kind (str): The kind of association to return (e.g., \"Child\").\n By default, all associations are returned.\n include_parents (bool): If True, files related through inheritance\n are included in the returned list. If False, only directly\n associated files are returned. For example, a file's JSON\n sidecar will always be returned, but other JSON files from\n which the sidecar inherits will only be returned if\n include_parents=True.\n\n Returns: A list of BIDSFile instances.\n \"\"\"\n if kind is None:\n return self._associations\n session = object_session(self)\n q = (session.query(BIDSFile)\n .join(FileAssociation, BIDSFile.path == FileAssociation.dst)\n .filter_by(kind=kind, src=self.path))\n associations = q.all()\n\n if not include_parents:\n return associations\n\n def collect_associations(results, bidsfile):\n results.append(bidsfile)\n for p in bidsfile.get_associations('Child'):\n results = collect_associations(results, p)\n return results\n\n return chain(*[collect_associations([], bf) for bf in associations])\n\n def get_metadata(self):\n \"\"\" Return all metadata associated with the current file. \"\"\"\n return self.get_entities(metadata=True)\n\n def get_entities(self, metadata=False, values='tags'):\n \"\"\" Return entity information for the current file.\n\n Args:\n metadata (bool, None): If False (default), only entities defined\n for filenames (and not those found in the JSON sidecar) are\n returned. If True, only entities found in metadata files (and not\n defined for filenames) are returned. If None, all available\n entities are returned.\n values (str): The kind of object to return in the dict's values.\n Must be one of:\n * 'tags': Returns only the tagged value--e.g., if the key\n is \"subject\", the value might be \"01\".\n * 'objects': Returns the corresponding Entity instance.\n\n Returns: A dict, where keys are entity names and values are Entity\n instances.\n \"\"\"\n session = object_session(self)\n query = (session.query(Tag)\n .filter_by(file_path=self.path)\n .join(Entity))\n if metadata not in (None, 'all'):\n query = query.filter(Entity.is_metadata==metadata)\n\n results = query.all()\n if values.startswith('obj'):\n return {t.entity_name: t.entity for t in results}\n return {t.entity_name: t.value for t in results}\n\n def copy(self, path_patterns, symbolic_link=False, root=None,\n conflicts='fail'):\n \"\"\" Copy the contents of a file to a new location.\n \n Args:\n path_patterns (list): List of patterns use to construct the new\n filename. See build_path documentation for details.\n symbolic_link (bool): If True, use a symbolic link to point to the\n existing file. If False, creates a new file.\n root (str): Optional path to prepend to the constructed filename.\n conflicts (str): Defines the desired action when the output path\n already exists. Must be one of:\n 'fail': raises an exception\n 'skip' does nothing\n 'overwrite': overwrites the existing file\n 'append': adds a suffix to each file copy, starting with 1\n \"\"\"\n new_filename = build_path(self.entities, path_patterns)\n if not new_filename:\n return None\n\n if new_filename[-1] == os.sep:\n new_filename += self.filename\n\n if os.path.isabs(self.path) or root is None:\n path = self.path\n else:\n path = os.path.join(root, self.path)\n\n if not os.path.exists(path):\n raise ValueError(\"Target filename to copy/symlink (%s) doesn't \"\n \"exist.\" % path)\n\n if symbolic_link:\n contents = None\n link_to = path\n else:\n with open(path, 'r') as f:\n contents = f.read()\n link_to = None\n\n write_contents_to_file(new_filename, contents=contents,\n link_to=link_to, content_mode='text', root=root,\n conflicts=conflicts)\n\n\nclass BIDSDataFile(BIDSFile):\n \"\"\" Represents a single data file in a BIDS dataset.\n\n Derived from `BIDSFile` and provides additional functionality such as\n obtaining pandas DataFrame data representation (via `get_df`).\n \"\"\"\n\n __mapper_args__ = {\n 'polymorphic_identity': 'data_file'\n }\n\n def get_df(self, include_timing=True, adjust_onset=False):\n \"\"\" Return the contents of a tsv file as a pandas DataFrame.\n\n Args:\n include_timing (bool): If True, adds an \"onset\" column to dense\n timeseries files (e.g., *_physio.tsv.gz).\n adjust_onset (bool): If True, the onset of each sample in a dense\n timeseries file is shifted to reflect the \"StartTime\" value in\n the JSON sidecar. If False, the first sample starts at 0 secs.\n Ignored if include_timing=False.\n\n Returns: A pandas DataFrame.\n \"\"\"\n import pandas as pd\n import numpy as np\n\n # TODO: memoize method instead of just caching the raw data\n if self._data is None:\n self._data = pd.read_csv(self.path, sep='\\t', na_values='n/a')\n\n data = self._data.copy()\n md = self.get_metadata()\n\n if self.entities['extension'] == 'tsv.gz':\n # We could potentially include some validation here, but that seems\n # like a job for the BIDS Validator.\n data.columns = md['Columns']\n if include_timing:\n onsets = np.arange(len(data)) / md['SamplingFrequency']\n if adjust_onset:\n onsets += md['StartTime']\n data.insert(0, 'onset', onsets)\n\n return data\n\n\nclass BIDSImageFile(BIDSFile):\n \"\"\" Represents a single neuroimaging data file in a BIDS dataset.\n\n Derived from `BIDSFile` and provides additional functionality such as\n obtaining nibabel's image file representation (via `get_image`).\n \"\"\"\n\n __mapper_args__ = {\n 'polymorphic_identity': 'image_file'\n }\n\n def get_image(self):\n \"\"\" Return the associated image file (if it exists) as a NiBabel object\n \"\"\"\n try:\n import nibabel as nb\n return nb.load(self.path)\n except Exception:\n raise ValueError(\"'{}' does not appear to be an image format \"\n \"NiBabel can read.\".format(self.path))\n\n\nclass BIDSJSONFile(BIDSFile):\n\n __mapper_args__ = {\n 'polymorphic_identity': 'json_file'\n }\n\n def get_dict(self):\n ''' Return the contents of the current file as a dictionary. '''\n d = json.loads(self.get_json())\n if not isinstance(d, dict):\n raise ValueError(\"File %s is a json containing %s, not a dict which was expected\" % (self.path, type(d)))\n return d\n\n def get_json(self):\n ''' Return the contents of the current file as a JSON string. '''\n with open(self.path, 'r') as f:\n return f.read()\n\n\nclass Entity(Base):\n \"\"\"\n Represents a single entity defined in the JSON config.\n\n Args:\n name (str): The name of the entity (e.g., 'subject', 'run', etc.)\n pattern (str): A regex pattern used to match against file names.\n Must define at least one group, and only the first group is\n kept as the match.\n mandatory (bool): If True, every File _must_ match this entity.\n directory (str): Optional pattern defining a directory associated\n with the entity.\n dtype (str): The optional data type of the Entity values. Must be\n one of 'int', 'float', 'bool', or 'str'. If None, no type\n enforcement will be attempted, which means the dtype of the\n value may be unpredictable.\n is_metadata (bool): Indicates whether or not the Entity is derived\n from JSON sidecars (True) or is a predefined Entity from a\n config (False).\n \"\"\"\n __tablename__ = 'entities'\n\n name = Column(String, primary_key=True)\n mandatory = Column(Boolean, default=False)\n pattern = Column(String)\n directory = Column(String, nullable=True)\n is_metadata = Column(Boolean, default=False)\n _dtype = Column(String, default='str')\n files = association_proxy(\"tags\", \"value\")\n\n def __init__(self, name, pattern=None, mandatory=False, directory=None,\n dtype='str', is_metadata=False):\n self.name = name\n self.pattern = pattern\n self.mandatory = mandatory\n self.directory = directory\n self.is_metadata = is_metadata\n\n if not isinstance(dtype, six.string_types):\n dtype = dtype.__name__\n self._dtype = dtype\n\n self._init_on_load()\n\n @reconstructor\n def _init_on_load(self):\n if self._dtype not in ('str', 'float', 'int', 'bool'):\n raise ValueError(\"Invalid dtype '{}'. Must be one of 'int', \"\n \"'float', 'bool', or 'str'.\".format(self._dtype))\n self.dtype = eval(self._dtype)\n self.regex = re.compile(self.pattern) if self.pattern is not None else None\n\n def __iter__(self):\n for i in self.unique():\n yield(i)\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n\n # Directly copy the SQLAlchemy connection before any setattr calls,\n # otherwise failures occur sporadically on Python 3.5 when the\n # _sa_instance_state attribute (randomly!) disappears.\n result._sa_instance_state = self._sa_instance_state\n\n memo[id(self)] = result\n\n for k, v in self.__dict__.items():\n if k == '_sa_instance_state':\n continue\n new_val = getattr(self, k) if k == 'regex' else deepcopy(v, memo)\n setattr(result, k, new_val)\n return result\n\n def match_file(self, f):\n \"\"\"\n Determine whether the passed file matches the Entity.\n\n Args:\n f (File): The BIDSFile instance to match against.\n\n Returns: the matched value if a match was found, otherwise None.\n \"\"\"\n if self.regex is None:\n return None\n m = self.regex.search(f.path)\n val = m.group(1) if m is not None else None\n\n return self._astype(val)\n\n def unique(self):\n \"\"\" Return all unique values/levels for the current entity. \"\"\"\n return list(set(self.files.values()))\n\n def count(self, files=False):\n \"\"\" Return a count of unique values or files.\n\n Args:\n files (bool): When True, counts all files mapped to the Entity.\n When False, counts all unique values.\n Returns: an int.\n \"\"\"\n return len(self.files) if files else len(self.unique())\n\n def _astype(self, val):\n if val is not None and self.dtype is not None:\n val = self.dtype(val)\n return val\n\n\nclass Tag(Base):\n \"\"\" Represents an association between a File and and Entity.\n\n Args:\n file (BIDSFile): The associated BIDSFile.\n entity (Entity): The associated Entity.\n value: The value to store for this file/entity pair. Must be of type\n str, int, float, bool, or any json-serializable structure.\n dtype (str): Optional type for the value field. If None, inferred from\n value. If passed, must be one of str, int, float, bool, or json.\n Any other value will be treated as json (and will fail if the\n value can't be serialized to json).\n \"\"\"\n __tablename__ = 'tags'\n\n file_path = Column(String, ForeignKey('files.path'), primary_key=True)\n entity_name = Column(String, ForeignKey('entities.name'), primary_key=True)\n _value = Column(String, nullable=False)\n _dtype = Column(String, default='str')\n\n file = relationship('BIDSFile', backref=backref(\n \"tags\", collection_class=attribute_mapped_collection(\"entity_name\")))\n entity = relationship('Entity', backref=backref(\n \"tags\", collection_class=attribute_mapped_collection(\"file_path\")))\n\n def __init__(self, file, entity, value, dtype=None):\n\n if dtype is None:\n dtype = type(value)\n\n self.value = value\n\n if not isinstance(dtype, six.string_types):\n dtype = dtype.__name__\n if dtype not in ('str', 'float', 'int', 'bool'):\n # Try serializing to JSON first\n try:\n value = json.dumps(value)\n dtype = 'json'\n except:\n raise ValueError(\n \"Passed value has an invalid dtype ({}). Must be one of \"\n \"int, float, bool, or 'str.\".format(dtype))\n value = str(value)\n self.file_path = file.path\n self.entity_name = entity.name\n\n self._value = value\n self._dtype = dtype\n\n self._init_on_load()\n\n @reconstructor\n def _init_on_load(self):\n if self._dtype not in ('str', 'float', 'int', 'bool', 'json'):\n raise ValueError(\"Invalid dtype '{}'. Must be one of 'int', \"\n \"'float', 'bool', 'str', or 'json'.\".format(self._dtype))\n if self._dtype == 'json':\n self.value = json.loads(self._value)\n self.dtype = 'json'\n else:\n self.dtype = eval(self._dtype)\n self.value = self.dtype(self._value)\n\n\nclass FileAssociation(Base):\n __tablename__ = 'associations'\n\n src = Column(String, ForeignKey('files.path'), primary_key=True)\n dst = Column(String, ForeignKey('files.path'), primary_key=True)\n kind = Column(String, primary_key=True)\n\n\n# Association objects\nconfig_to_entity_map = Table('config_to_entity_map', Base.metadata,\n Column('config', String, ForeignKey('configs.name')),\n Column('entity', String, ForeignKey('entities.name'))\n)\n", "path": "bids/layout/models.py" } ]
[ { "content": "from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.ext.associationproxy import association_proxy\nfrom sqlalchemy.orm.collections import attribute_mapped_collection\nfrom sqlalchemy import (Column, Integer, String, Boolean, ForeignKey, Table)\nfrom sqlalchemy.orm import reconstructor, relationship, backref, object_session\nimport re\nimport os\nimport warnings\nimport json\nfrom copy import deepcopy\nfrom itertools import chain\n\nfrom .writing import build_path, write_contents_to_file\nfrom ..utils import listify\nfrom ..config import get_option\nfrom ..external import six\n\n\nBase = declarative_base()\n\n\nclass Config(Base):\n \"\"\" Container for BIDS configuration information.\n\n Args:\n name (str): The name to give the Config (e.g., 'bids').\n entities (list): A list of dictionaries containing entity configuration\n information.\n default_path_patterns (list): Optional list of patterns used to build\n new paths.\n session (Session, None): an optional SQLAlchemy session. If passed,\n the session is used to update the database with any newly created\n Entity objects. If None, no database update occurs.\n \"\"\"\n __tablename__ = 'configs'\n\n name = Column(String, primary_key=True)\n _default_path_patterns = Column(String)\n entities = relationship(\n \"Entity\", secondary=\"config_to_entity_map\",\n collection_class=attribute_mapped_collection('name'))\n\n def __init__(self, name, entities=None, default_path_patterns=None,\n session=None):\n\n self.name = name\n self.default_path_patterns = default_path_patterns\n self._default_path_patterns = json.dumps(default_path_patterns)\n\n if entities:\n for ent in entities:\n if session is not None:\n existing = (session.query(Config)\n .filter_by(name=ent['name']).first())\n else:\n existing = None\n ent = existing or Entity(**ent)\n self.entities[ent.name] = ent\n if session is not None:\n session.add_all(list(self.entities.values()))\n session.commit()\n\n @reconstructor\n def _init_on_load(self):\n self.default_path_patterns = json.loads(self._default_path_patterns)\n\n @classmethod\n def load(self, config, session=None):\n ''' Load a Config instance from the passed configuration data.\n\n Args:\n config (str, dict): A string or dict containing configuration\n information. Must be one of:\n * A string giving the name of a predefined config file\n (e.g., 'bids' or 'derivatives')\n * A path to a JSON file containing config information\n * A dictionary containing config information\n session (Session, None): An optional SQLAlchemy Session instance.\n If passed, the session is used to check the database for (and\n return) an existing Config with name defined in config['name'].\n\n Returns: A Config instance.\n '''\n\n if isinstance(config, six.string_types):\n config_paths = get_option('config_paths')\n if config in config_paths:\n config = config_paths[config]\n if not os.path.exists(config):\n raise ValueError(\"{} is not a valid path.\".format(config))\n else:\n with open(config, 'r') as f:\n config = json.load(f)\n\n # Return existing Config record if one exists\n if session is not None:\n result = (session.query(Config)\n .filter_by(name=config['name']).first())\n if result:\n return result\n\n return Config(session=session, **config)\n\n\nclass BIDSFile(Base):\n \"\"\" Represents a single file or directory in a BIDS dataset.\n\n Args:\n filename (str): The path to the corresponding file.\n \n \"\"\"\n __tablename__ = 'files'\n\n path = Column(String, primary_key=True)\n filename = Column(String)\n dirname = Column(String)\n entities = association_proxy(\"tags\", \"value\")\n is_dir = Column(Boolean)\n class_ = Column(String(20))\n\n _associations = relationship('BIDSFile', secondary='associations',\n primaryjoin='FileAssociation.dst == BIDSFile.path',\n secondaryjoin='FileAssociation.src == BIDSFile.path')\n\n __mapper_args__ = {\n 'polymorphic_on': class_,\n 'polymorphic_identity': 'file'\n }\n\n def __init__(self, filename, derivatives=False, is_dir=False):\n self.path = filename\n self.filename = os.path.basename(self.path)\n self.dirname = os.path.dirname(self.path)\n self.is_dir = not self.filename\n self._init_on_load()\n\n def __getattr__(self, attr):\n # Ensures backwards compatibility with old File_ namedtuple, which is\n # deprecated as of 0.7.\n # _ check first to not mask away access to __setstate__ etc.\n # AFAIK None of the entities are allowed to start with _ anyways\n # so the check is more generic than __\n if not attr.startswith('_') and attr in self.entities:\n warnings.warn(\"Accessing entities as attributes is deprecated as \"\n \"of 0.7. Please use the .entities dictionary instead\"\n \" (i.e., .entities['%s'] instead of .%s.\"\n % (attr, attr))\n return self.entities[attr]\n raise AttributeError(\"%s object has no attribute named %r\" %\n (self.__class__.__name__, attr))\n\n def __repr__(self):\n return \"<{} filename='{}'>\".format(self.__class__.__name__, self.path)\n\n @reconstructor\n def _init_on_load(self):\n self._data = None\n\n def get_associations(self, kind=None, include_parents=False):\n \"\"\" Get associated files, optionally limiting by association kind.\n\n Args:\n kind (str): The kind of association to return (e.g., \"Child\").\n By default, all associations are returned.\n include_parents (bool): If True, files related through inheritance\n are included in the returned list. If False, only directly\n associated files are returned. For example, a file's JSON\n sidecar will always be returned, but other JSON files from\n which the sidecar inherits will only be returned if\n include_parents=True.\n\n Returns: A list of BIDSFile instances.\n \"\"\"\n if kind is None:\n return self._associations\n session = object_session(self)\n q = (session.query(BIDSFile)\n .join(FileAssociation, BIDSFile.path == FileAssociation.dst)\n .filter_by(kind=kind, src=self.path))\n associations = q.all()\n\n if not include_parents:\n return associations\n\n def collect_associations(results, bidsfile):\n results.append(bidsfile)\n for p in bidsfile.get_associations('Child'):\n results = collect_associations(results, p)\n return results\n\n return chain(*[collect_associations([], bf) for bf in associations])\n\n def get_metadata(self):\n \"\"\" Returns all metadata associated with the current file. \"\"\"\n return self.get_entities(metadata=True)\n\n def get_entities(self, metadata=False, values='tags'):\n \"\"\" Returns entity information for the current file.\n\n Args:\n metadata (bool, None): If False (default), only entities defined\n for filenames (and not those found in the JSON sidecar) are\n returned. If True, only entities found in metadata files (and not\n defined for filenames) are returned. If None, all available\n entities are returned.\n values (str): The kind of object to return in the dict's values.\n Must be one of:\n * 'tags': Returns only the tagged value--e.g., if the key\n is \"subject\", the value might be \"01\".\n * 'objects': Returns the corresponding Entity instance.\n\n Returns: A dict, where keys are entity names and values are Entity\n instances.\n \"\"\"\n session = object_session(self)\n query = (session.query(Tag)\n .filter_by(file_path=self.path)\n .join(Entity))\n if metadata not in (None, 'all'):\n query = query.filter(Entity.is_metadata==metadata)\n\n results = query.all()\n if values.startswith('obj'):\n return {t.entity_name: t.entity for t in results}\n return {t.entity_name: t.value for t in results}\n\n def copy(self, path_patterns, symbolic_link=False, root=None,\n conflicts='fail'):\n \"\"\" Copy the contents of a file to a new location.\n \n Args:\n path_patterns (list): List of patterns use to construct the new\n filename. See build_path documentation for details.\n symbolic_link (bool): If True, use a symbolic link to point to the\n existing file. If False, creates a new file.\n root (str): Optional path to prepend to the constructed filename.\n conflicts (str): Defines the desired action when the output path\n already exists. Must be one of:\n 'fail': raises an exception\n 'skip' does nothing\n 'overwrite': overwrites the existing file\n 'append': adds a suffix to each file copy, starting with 1\n \"\"\"\n new_filename = build_path(self.entities, path_patterns)\n if not new_filename:\n return None\n\n if new_filename[-1] == os.sep:\n new_filename += self.filename\n\n if os.path.isabs(self.path) or root is None:\n path = self.path\n else:\n path = os.path.join(root, self.path)\n\n if not os.path.exists(path):\n raise ValueError(\"Target filename to copy/symlink (%s) doesn't \"\n \"exist.\" % path)\n\n if symbolic_link:\n contents = None\n link_to = path\n else:\n with open(path, 'r') as f:\n contents = f.read()\n link_to = None\n\n write_contents_to_file(new_filename, contents=contents,\n link_to=link_to, content_mode='text', root=root,\n conflicts=conflicts)\n\n\nclass BIDSDataFile(BIDSFile):\n\n __mapper_args__ = {\n 'polymorphic_identity': 'data_file'\n }\n\n def get_df(self, include_timing=True, adjust_onset=False):\n \"\"\" Returns the contents of a tsv file as a pandas DataFrame.\n\n Args:\n include_timing (bool): If True, adds an \"onset\" column to dense\n timeseries files (e.g., *_physio.tsv.gz).\n adjust_onset (bool): If True, the onset of each sample in a dense\n timeseries file is shifted to reflect the \"StartTime\" value in\n the JSON sidecar. If False, the first sample starts at 0 secs.\n Ignored if include_timing=False.\n\n Returns: A pandas DataFrame.\n \"\"\"\n import pandas as pd\n import numpy as np\n\n # TODO: memoize method instead of just caching the raw data\n if self._data is None:\n self._data = pd.read_csv(self.path, sep='\\t', na_values='n/a')\n\n data = self._data.copy()\n md = self.get_metadata()\n\n if self.entities['extension'] == 'tsv.gz':\n # We could potentially include some validation here, but that seems\n # like a job for the BIDS Validator.\n data.columns = md['Columns']\n if include_timing:\n onsets = np.arange(len(data)) / md['SamplingFrequency']\n if adjust_onset:\n onsets += md['StartTime']\n data.insert(0, 'onset', onsets)\n\n return data\n\n\nclass BIDSImageFile(BIDSFile):\n\n __mapper_args__ = {\n 'polymorphic_identity': 'image_file'\n }\n\n def get_image(self):\n \"\"\" Return the associated image file (if it exists) as a NiBabel object\n \"\"\"\n try:\n import nibabel as nb\n return nb.load(self.path)\n except Exception:\n raise ValueError(\"'{}' does not appear to be an image format \"\n \"NiBabel can read.\".format(self.path))\n\n\nclass Entity(Base):\n \"\"\"\n Represents a single entity defined in the JSON config.\n\n Args:\n name (str): The name of the entity (e.g., 'subject', 'run', etc.)\n pattern (str): A regex pattern used to match against file names.\n Must define at least one group, and only the first group is\n kept as the match.\n mandatory (bool): If True, every File _must_ match this entity.\n directory (str): Optional pattern defining a directory associated\n with the entity.\n dtype (str): The optional data type of the Entity values. Must be\n one of 'int', 'float', 'bool', or 'str'. If None, no type\n enforcement will be attempted, which means the dtype of the\n value may be unpredictable.\n is_metadata (bool): Indicates whether or not the Entity is derived\n from JSON sidecars (True) or is a predefined Entity from a\n config (False).\n \"\"\"\n __tablename__ = 'entities'\n\n name = Column(String, primary_key=True)\n mandatory = Column(Boolean, default=False)\n pattern = Column(String)\n directory = Column(String, nullable=True)\n is_metadata = Column(Boolean, default=False)\n _dtype = Column(String, default='str')\n files = association_proxy(\"tags\", \"value\")\n\n def __init__(self, name, pattern=None, mandatory=False, directory=None,\n dtype='str', is_metadata=False):\n self.name = name\n self.pattern = pattern\n self.mandatory = mandatory\n self.directory = directory\n self.is_metadata = is_metadata\n\n if not isinstance(dtype, six.string_types):\n dtype = dtype.__name__\n self._dtype = dtype\n\n self._init_on_load()\n\n @reconstructor\n def _init_on_load(self):\n if self._dtype not in ('str', 'float', 'int', 'bool'):\n raise ValueError(\"Invalid dtype '{}'. Must be one of 'int', \"\n \"'float', 'bool', or 'str'.\".format(self._dtype))\n self.dtype = eval(self._dtype)\n self.regex = re.compile(self.pattern) if self.pattern is not None else None\n\n def __iter__(self):\n for i in self.unique():\n yield(i)\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n\n # Directly copy the SQLAlchemy connection before any setattr calls,\n # otherwise failures occur sporadically on Python 3.5 when the\n # _sa_instance_state attribute (randomly!) disappears.\n result._sa_instance_state = self._sa_instance_state\n\n memo[id(self)] = result\n\n for k, v in self.__dict__.items():\n if k == '_sa_instance_state':\n continue\n new_val = getattr(self, k) if k == 'regex' else deepcopy(v, memo)\n setattr(result, k, new_val)\n return result\n\n def match_file(self, f):\n \"\"\"\n Determine whether the passed file matches the Entity.\n\n Args:\n f (File): The BIDSFile instance to match against.\n\n Returns: the matched value if a match was found, otherwise None.\n \"\"\"\n if self.regex is None:\n return None\n m = self.regex.search(f.path)\n val = m.group(1) if m is not None else None\n\n return self._astype(val)\n\n def unique(self):\n \"\"\" Returns all unique values/levels for the current entity. \"\"\"\n return list(set(self.files.values()))\n\n def count(self, files=False):\n \"\"\" Returns a count of unique values or files.\n\n Args:\n files (bool): When True, counts all files mapped to the Entity.\n When False, counts all unique values.\n Returns: an int.\n \"\"\"\n return len(self.files) if files else len(self.unique())\n\n def _astype(self, val):\n if val is not None and self.dtype is not None:\n val = self.dtype(val)\n return val\n\n\nclass Tag(Base):\n \"\"\"\n Represents an association between a File and and Entity.\n\n Args:\n file (BIDSFile): The associated BIDSFile.\n entity (Entity): The associated Entity.\n value: The value to store for this file/entity pair. Must be of type\n str, int, float, bool, or any json-serializable structure.\n dtype (str): Optional type for the value field. If None, inferred from\n value. If passed, must be one of str, int, float, bool, or json.\n Any other value will be treated as json (and will fail if the\n value can't be serialized to json).\n \"\"\"\n __tablename__ = 'tags'\n\n file_path = Column(String, ForeignKey('files.path'), primary_key=True)\n entity_name = Column(String, ForeignKey('entities.name'), primary_key=True)\n _value = Column(String, nullable=False)\n _dtype = Column(String, default='str')\n\n file = relationship('BIDSFile', backref=backref(\n \"tags\", collection_class=attribute_mapped_collection(\"entity_name\")))\n entity = relationship('Entity', backref=backref(\n \"tags\", collection_class=attribute_mapped_collection(\"file_path\")))\n\n def __init__(self, file, entity, value, dtype=None):\n\n if dtype is None:\n dtype = type(value)\n\n self.value = value\n\n if not isinstance(dtype, six.string_types):\n dtype = dtype.__name__\n if dtype not in ('str', 'float', 'int', 'bool'):\n # Try serializing to JSON first\n try:\n value = json.dumps(value)\n dtype = 'json'\n except:\n raise ValueError(\n \"Passed value has an invalid dtype ({}). Must be one of \"\n \"int, float, bool, or 'str.\".format(dtype))\n value = str(value)\n self.file_path = file.path\n self.entity_name = entity.name\n\n self._value = value\n self._dtype = dtype\n\n self._init_on_load()\n \n def __repr__(self):\n msg = \"<Tag file:{!r} entity:{!r} value:{!r}>\"\n return msg.format(self.file_path, self.entity_name, self.value)\n\n @reconstructor\n def _init_on_load(self):\n if self._dtype not in ('str', 'float', 'int', 'bool', 'json'):\n raise ValueError(\"Invalid dtype '{}'. Must be one of 'int', \"\n \"'float', 'bool', 'str', or 'json'.\".format(self._dtype))\n if self._dtype == 'json':\n self.value = json.loads(self._value)\n self.dtype = 'json'\n else:\n self.dtype = eval(self._dtype)\n self.value = self.dtype(self._value)\n\n\nclass FileAssociation(Base):\n __tablename__ = 'associations'\n\n src = Column(String, ForeignKey('files.path'), primary_key=True)\n dst = Column(String, ForeignKey('files.path'), primary_key=True)\n kind = Column(String, primary_key=True)\n\n\n# Association objects\nconfig_to_entity_map = Table('config_to_entity_map', Base.metadata,\n Column('config', String, ForeignKey('configs.name')),\n Column('entity', String, ForeignKey('entities.name'))\n)\n", "path": "bids/layout/models.py" } ]
diff --git a/bids/layout/models.py b/bids/layout/models.py index 41f49596e..997e7a63d 100644 --- a/bids/layout/models.py +++ b/bids/layout/models.py @@ -491,6 +491,10 @@ def __init__(self, file, entity, value, dtype=None): self._dtype = dtype self._init_on_load() + + def __repr__(self): + msg = "<Tag file:{!r} entity:{!r} value:{!r}>" + return msg.format(self.file_path, self.entity_name, self.value) @reconstructor def _init_on_load(self): diff --git a/bids/layout/tests/test_models.py b/bids/layout/tests/test_models.py index b9075f5e9..fce088649 100644 --- a/bids/layout/tests/test_models.py +++ b/bids/layout/tests/test_models.py @@ -97,6 +97,13 @@ def test_file_associations(): assert set(results) == {md1, md2} +def test_tag_init(sample_bidsfile, subject_entity): + f, e = sample_bidsfile, subject_entity + tag = Tag(f, e, 'zzz') + rep = str(tag) + assert rep.startswith("<Tag file:") and f.path in rep and 'zzz' in rep + + def test_tag_dtype(sample_bidsfile, subject_entity): f, e = sample_bidsfile, subject_entity # Various ways of initializing--should all give same result
uccser__cs-unplugged-537
Center navbar menu text on mobile devices ![image](https://user-images.githubusercontent.com/8001048/27560437-4254d4b0-5b18-11e7-811f-6da827fa1365.png) Always display curriculum areas for learning outcomes on a new line This prevents a confusing interface, like so: ![image](https://user-images.githubusercontent.com/8001048/27560331-e410500a-5b17-11e7-8cd2-db32964f721b.png)
[ { "content": "\"\"\"Views for the topics application.\"\"\"\n\nfrom django.shortcuts import get_object_or_404\nfrom django.views import generic\nfrom django.http import JsonResponse, Http404\nfrom config.templatetags.render_html_field import render_html_with_static\nfrom utils.group_lessons_by_age import group_lessons_by_age\nfrom .models import (\n Topic,\n CurriculumIntegration,\n UnitPlan,\n Lesson,\n LessonNumber,\n ProgrammingChallenge,\n ProgrammingChallengeNumber,\n ProgrammingChallengeImplementation,\n ResourceDescription,\n GlossaryTerm,\n)\n\n\nclass IndexView(generic.ListView):\n \"\"\"View for the topics application homepage.\"\"\"\n\n template_name = \"topics/index.html\"\n context_object_name = \"all_topics\"\n\n def get_queryset(self):\n \"\"\"Get queryset of all topics.\n\n Returns:\n Queryset of Topic objects ordered by name.\n \"\"\"\n return Topic.objects.order_by(\"name\")\n\n\nclass TopicView(generic.DetailView):\n \"\"\"View for a specific topic.\"\"\"\n\n model = Topic\n template_name = \"topics/topic.html\"\n slug_url_kwarg = \"topic_slug\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the topic view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(TopicView, self).get_context_data(**kwargs)\n # Add in a QuerySet of all the connected unit plans\n unit_plans = UnitPlan.objects.filter(topic=self.object).order_by(\"name\").select_related()\n for unit_plan in unit_plans:\n unit_plan.grouped_lessons = group_lessons_by_age(unit_plan.lessons.all())\n context[\"unit_plans\"] = unit_plans\n # Add in a QuerySet of all the connected curriculum integrations\n context[\"curriculum_integrations\"] = CurriculumIntegration.objects.filter(topic=self.object).order_by(\"number\")\n context[\"programming_challenges\"] = ProgrammingChallenge.objects.filter(topic=self.object).order_by(\n \"challenge_set_number\",\n \"challenge_number\"\n )\n lessons = self.object.lessons.all()\n resources = set()\n for lesson in lessons:\n lesson_resources = lesson.generated_resources.all()\n for lesson_resource in lesson_resources:\n resources.add(lesson_resource)\n context[\"resources\"] = resources\n return context\n\n\nclass UnitPlanView(generic.DetailView):\n \"\"\"View for a specific unit plan.\"\"\"\n\n model = UnitPlan\n template_name = \"topics/unit-plan.html\"\n context_object_name = \"unit_plan\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the unit plan view.\n\n Returns:\n UnitPlan object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n slug=self.kwargs.get(\"unit_plan_slug\", None)\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the unit plan view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(UnitPlanView, self).get_context_data(**kwargs)\n # Loading object under consistent context names for breadcrumbs\n context[\"topic\"] = self.object.topic\n # Add all the connected lessons\n context[\"grouped_lessons\"] = group_lessons_by_age(self.object.lessons.all())\n return context\n\n\nclass LessonView(generic.DetailView):\n \"\"\"View for a specific lesson.\"\"\"\n\n model = Lesson\n template_name = \"topics/lesson.html\"\n context_object_name = \"lesson\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the lesson view.\n\n Returns:\n Lesson object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n unit_plan__slug=self.kwargs.get(\"unit_plan_slug\", None),\n slug=self.kwargs.get(\"lesson_slug\", None),\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the lesson view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(LessonView, self).get_context_data(**kwargs)\n # Loading objects under consistent context names for breadcrumbs\n context[\"lesson_ages\"] = []\n for age_group in self.object.age_group.order_by(\"ages\"):\n number = LessonNumber.objects.get(lesson=self.object, age_group=age_group).number\n context[\"lesson_ages\"].append(\n {\n \"lower\": age_group.ages.lower,\n \"upper\": age_group.ages.upper,\n \"number\": number,\n }\n )\n context[\"topic\"] = self.object.topic\n context[\"unit_plan\"] = self.object.unit_plan\n # Add all the connected programming challenges\n context[\"programming_challenges\"] = self.object.programming_challenges.all()\n # Add all the connected learning outcomes\n context[\"learning_outcomes\"] = self.object.learning_outcomes.all().select_related()\n # Add all the connected generated resources\n related_resources = self.object.generated_resources.all()\n generated_resources = []\n for related_resource in related_resources:\n generated_resource = dict()\n generated_resource[\"slug\"] = related_resource.slug\n generated_resource[\"name\"] = related_resource.name\n generated_resource[\"thumbnail\"] = related_resource.thumbnail_static_path\n relationship = ResourceDescription.objects.get(resource=related_resource, lesson=self.object)\n generated_resource[\"description\"] = relationship.description\n generated_resources.append(generated_resource)\n context[\"generated_resources\"] = generated_resources\n\n return context\n\n\nclass ProgrammingChallengeList(generic.base.TemplateView):\n \"\"\"View for listing all programming challenges for a lesson.\"\"\"\n\n template_name = \"topics/programming-challenge-lesson-list.html\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the programming challenge list view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n context = super(ProgrammingChallengeList, self).get_context_data(**kwargs)\n lesson = get_object_or_404(\n Lesson.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n unit_plan__slug=self.kwargs.get(\"unit_plan_slug\", None),\n slug=self.kwargs.get(\"lesson_slug\", None),\n )\n context[\"lesson\"] = lesson\n context[\"programming_challenges\"] = lesson.retrieve_related_programming_challenges()\n context[\"unit_plan\"] = lesson.unit_plan\n context[\"topic\"] = lesson.topic\n return context\n\n\nclass ProgrammingChallengeView(generic.DetailView):\n \"\"\"View for a specific programming challenge.\"\"\"\n\n model = ProgrammingChallenge\n template_name = \"topics/programming-challenge.html\"\n context_object_name = \"programming_challenge\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the programming challenge view.\n\n Returns:\n ProgrammingChallenge object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n slug=self.kwargs.get(\"programming_challenge_slug\", None)\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the programming challenge view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(ProgrammingChallengeView, self).get_context_data(**kwargs)\n context[\"lessons\"] = self.object.lessons.all()\n for lesson in context[\"lessons\"]:\n challenge_numbers = ProgrammingChallengeNumber.objects.get(\n lesson=lesson,\n programming_challenge=self.object\n )\n lesson.challenge_set_number = challenge_numbers.challenge_set_number\n lesson.challenge_number = challenge_numbers.challenge_number\n context[\"topic\"] = self.object.topic\n # Add all the connected learning outcomes\n context[\"learning_outcomes\"] = self.object.learning_outcomes.all()\n context[\"implementations\"] = self.object.ordered_implementations()\n return context\n\n\nclass ProgrammingChallengeLanguageSolutionView(generic.DetailView):\n \"\"\"View for a language implementation for a programming challenge.\"\"\"\n\n model = ProgrammingChallengeImplementation\n template_name = \"topics/programming-challenge-language-solution.html\"\n context_object_name = \"implementation\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the language implementation view.\n\n Returns:\n ProgrammingChallengeImplementation object, or raises 404\n error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n challenge__slug=self.kwargs.get(\"programming_challenge_slug\", None),\n language__slug=self.kwargs.get(\"programming_language_slug\", None)\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the language implementation view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(ProgrammingChallengeLanguageSolutionView, self).get_context_data(**kwargs)\n # Loading object under consistent context names for breadcrumbs\n context[\"topic\"] = self.object.topic\n context[\"programming_challenge\"] = self.object.challenge\n return context\n\n\nclass AllCurriculumIntegrationList(generic.ListView):\n \"\"\"View for listing all curriculum integrations.\"\"\"\n\n model = CurriculumIntegration\n template_name = \"topics/all-curriculum-integration-list.html\"\n context_object_name = \"curriculum_integrations\"\n\n def get_queryset(self, **kwargs):\n \"\"\"Retrieve all curriculum integrations.\n\n Returns:\n Queryset of CurriculumIntegration objects.\n \"\"\"\n return CurriculumIntegration.objects.select_related().order_by(\"topic__name\", \"number\")\n\n\nclass CurriculumIntegrationView(generic.DetailView):\n \"\"\"View for a specific curriculum integration.\"\"\"\n\n model = CurriculumIntegration\n queryset = CurriculumIntegration.objects.all()\n template_name = \"topics/curriculum-integration.html\"\n context_object_name = \"integration\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the curriculum integration view.\n\n Returns:\n CurriculumIntegration object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n slug=self.kwargs.get(\"integration_slug\", None)\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the curriculum integration view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(CurriculumIntegrationView, self).get_context_data(**kwargs)\n # Loading objects under consistent context names for breadcrumbs\n context[\"topic\"] = self.object.topic\n # Add in a QuerySet of all the connected curriculum areas\n context[\"integration_curriculum_areas\"] = self.object.curriculum_areas.order_by(\"name\")\n # Add in a QuerySet of all the prerequisite lessons\n context[\"prerequisite_lessons\"] = self.object.prerequisite_lessons.select_related().order_by(\n \"unit_plan__name\",\n )\n return context\n\n\nclass OtherResourcesView(generic.DetailView):\n \"\"\"View for detailing other resources for a specific topic.\"\"\"\n\n model = Topic\n template_name = \"topics/topic-other-resources.html\"\n slug_url_kwarg = \"topic_slug\"\n\n\nclass GlossaryList(generic.ListView):\n \"\"\"Provide glossary view of all terms.\"\"\"\n\n template_name = \"topics/glossary.html\"\n context_object_name = \"glossary_terms\"\n\n def get_queryset(self):\n \"\"\"Get queryset of all glossary terms.\n\n Returns:\n Queryset of GlossaryTerm objects ordered by term.\n \"\"\"\n return GlossaryTerm.objects.order_by(\"term\")\n\n\ndef glossary_json(request, **kwargs):\n \"\"\"Provide JSON data for glossary term.\n\n Args:\n request: The HTTP request.\n\n Returns:\n JSON response is sent containing data for the requested term.\n\n Raises:\n 404 error if term not found.\n \"\"\"\n # If term parameter, then return JSON\n if \"term\" in request.GET:\n glossary_slug = request.GET.get(\"term\")\n glossary_item = get_object_or_404(\n GlossaryTerm,\n slug=glossary_slug\n )\n data = {\n \"slug\": glossary_slug,\n \"term\": glossary_item.term,\n \"definition\": render_html_with_static(glossary_item.definition)\n }\n return JsonResponse(data)\n else:\n raise Http404(\"Term parameter not specified.\")\n", "path": "csunplugged/topics/views.py" } ]
[ { "content": "\"\"\"Views for the topics application.\"\"\"\n\nfrom django.shortcuts import get_object_or_404\nfrom django.views import generic\nfrom django.http import JsonResponse, Http404\nfrom config.templatetags.render_html_field import render_html_with_static\nfrom utils.group_lessons_by_age import group_lessons_by_age\nfrom .models import (\n Topic,\n CurriculumIntegration,\n UnitPlan,\n Lesson,\n LessonNumber,\n ProgrammingChallenge,\n ProgrammingChallengeNumber,\n ProgrammingChallengeImplementation,\n ResourceDescription,\n GlossaryTerm,\n)\n\n\nclass IndexView(generic.ListView):\n \"\"\"View for the topics application homepage.\"\"\"\n\n template_name = \"topics/index.html\"\n context_object_name = \"all_topics\"\n\n def get_queryset(self):\n \"\"\"Get queryset of all topics.\n\n Returns:\n Queryset of Topic objects ordered by name.\n \"\"\"\n return Topic.objects.order_by(\"name\").prefetch_related(\"unit_plans\")\n\n\nclass TopicView(generic.DetailView):\n \"\"\"View for a specific topic.\"\"\"\n\n model = Topic\n template_name = \"topics/topic.html\"\n slug_url_kwarg = \"topic_slug\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the topic view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(TopicView, self).get_context_data(**kwargs)\n # Add in a QuerySet of all the connected unit plans\n unit_plans = UnitPlan.objects.filter(topic=self.object).order_by(\"name\").select_related()\n for unit_plan in unit_plans:\n unit_plan.grouped_lessons = group_lessons_by_age(unit_plan.lessons.all())\n context[\"unit_plans\"] = unit_plans\n # Add in a QuerySet of all the connected curriculum integrations\n context[\"curriculum_integrations\"] = CurriculumIntegration.objects.filter(topic=self.object).order_by(\"number\")\n context[\"programming_challenges\"] = ProgrammingChallenge.objects.filter(topic=self.object).order_by(\n \"challenge_set_number\",\n \"challenge_number\"\n )\n lessons = self.object.lessons.all()\n resources = set()\n for lesson in lessons:\n lesson_resources = lesson.generated_resources.all()\n for lesson_resource in lesson_resources:\n resources.add(lesson_resource)\n context[\"resources\"] = resources\n return context\n\n\nclass UnitPlanView(generic.DetailView):\n \"\"\"View for a specific unit plan.\"\"\"\n\n model = UnitPlan\n template_name = \"topics/unit-plan.html\"\n context_object_name = \"unit_plan\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the unit plan view.\n\n Returns:\n UnitPlan object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n slug=self.kwargs.get(\"unit_plan_slug\", None)\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the unit plan view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(UnitPlanView, self).get_context_data(**kwargs)\n # Loading object under consistent context names for breadcrumbs\n context[\"topic\"] = self.object.topic\n # Add all the connected lessons\n context[\"grouped_lessons\"] = group_lessons_by_age(self.object.lessons.all())\n return context\n\n\nclass LessonView(generic.DetailView):\n \"\"\"View for a specific lesson.\"\"\"\n\n model = Lesson\n template_name = \"topics/lesson.html\"\n context_object_name = \"lesson\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the lesson view.\n\n Returns:\n Lesson object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n unit_plan__slug=self.kwargs.get(\"unit_plan_slug\", None),\n slug=self.kwargs.get(\"lesson_slug\", None),\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the lesson view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(LessonView, self).get_context_data(**kwargs)\n # Loading objects under consistent context names for breadcrumbs\n context[\"lesson_ages\"] = []\n for age_group in self.object.age_group.order_by(\"ages\"):\n number = LessonNumber.objects.get(lesson=self.object, age_group=age_group).number\n context[\"lesson_ages\"].append(\n {\n \"lower\": age_group.ages.lower,\n \"upper\": age_group.ages.upper,\n \"number\": number,\n }\n )\n context[\"topic\"] = self.object.topic\n context[\"unit_plan\"] = self.object.unit_plan\n # Add all the connected programming challenges\n context[\"programming_challenges\"] = self.object.programming_challenges.all()\n # Add all the connected learning outcomes\n context[\"learning_outcomes\"] = self.object.learning_outcomes.all().select_related()\n # Add all the connected generated resources\n related_resources = self.object.generated_resources.all()\n generated_resources = []\n for related_resource in related_resources:\n generated_resource = dict()\n generated_resource[\"slug\"] = related_resource.slug\n generated_resource[\"name\"] = related_resource.name\n generated_resource[\"thumbnail\"] = related_resource.thumbnail_static_path\n relationship = ResourceDescription.objects.get(resource=related_resource, lesson=self.object)\n generated_resource[\"description\"] = relationship.description\n generated_resources.append(generated_resource)\n context[\"generated_resources\"] = generated_resources\n\n return context\n\n\nclass ProgrammingChallengeList(generic.base.TemplateView):\n \"\"\"View for listing all programming challenges for a lesson.\"\"\"\n\n template_name = \"topics/programming-challenge-lesson-list.html\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the programming challenge list view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n context = super(ProgrammingChallengeList, self).get_context_data(**kwargs)\n lesson = get_object_or_404(\n Lesson.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n unit_plan__slug=self.kwargs.get(\"unit_plan_slug\", None),\n slug=self.kwargs.get(\"lesson_slug\", None),\n )\n context[\"lesson\"] = lesson\n context[\"programming_challenges\"] = lesson.retrieve_related_programming_challenges()\n context[\"unit_plan\"] = lesson.unit_plan\n context[\"topic\"] = lesson.topic\n return context\n\n\nclass ProgrammingChallengeView(generic.DetailView):\n \"\"\"View for a specific programming challenge.\"\"\"\n\n model = ProgrammingChallenge\n template_name = \"topics/programming-challenge.html\"\n context_object_name = \"programming_challenge\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the programming challenge view.\n\n Returns:\n ProgrammingChallenge object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n slug=self.kwargs.get(\"programming_challenge_slug\", None)\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the programming challenge view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(ProgrammingChallengeView, self).get_context_data(**kwargs)\n context[\"lessons\"] = self.object.lessons.all()\n for lesson in context[\"lessons\"]:\n challenge_numbers = ProgrammingChallengeNumber.objects.get(\n lesson=lesson,\n programming_challenge=self.object\n )\n lesson.challenge_set_number = challenge_numbers.challenge_set_number\n lesson.challenge_number = challenge_numbers.challenge_number\n context[\"topic\"] = self.object.topic\n # Add all the connected learning outcomes\n context[\"learning_outcomes\"] = self.object.learning_outcomes.all()\n context[\"implementations\"] = self.object.ordered_implementations()\n return context\n\n\nclass ProgrammingChallengeLanguageSolutionView(generic.DetailView):\n \"\"\"View for a language implementation for a programming challenge.\"\"\"\n\n model = ProgrammingChallengeImplementation\n template_name = \"topics/programming-challenge-language-solution.html\"\n context_object_name = \"implementation\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the language implementation view.\n\n Returns:\n ProgrammingChallengeImplementation object, or raises 404\n error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n challenge__slug=self.kwargs.get(\"programming_challenge_slug\", None),\n language__slug=self.kwargs.get(\"programming_language_slug\", None)\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the language implementation view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(ProgrammingChallengeLanguageSolutionView, self).get_context_data(**kwargs)\n # Loading object under consistent context names for breadcrumbs\n context[\"topic\"] = self.object.topic\n context[\"programming_challenge\"] = self.object.challenge\n return context\n\n\nclass AllCurriculumIntegrationList(generic.ListView):\n \"\"\"View for listing all curriculum integrations.\"\"\"\n\n model = CurriculumIntegration\n template_name = \"topics/all-curriculum-integration-list.html\"\n context_object_name = \"curriculum_integrations\"\n\n def get_queryset(self, **kwargs):\n \"\"\"Retrieve all curriculum integrations.\n\n Returns:\n Queryset of CurriculumIntegration objects.\n \"\"\"\n return CurriculumIntegration.objects.select_related().order_by(\"topic__name\", \"number\")\n\n\nclass CurriculumIntegrationView(generic.DetailView):\n \"\"\"View for a specific curriculum integration.\"\"\"\n\n model = CurriculumIntegration\n queryset = CurriculumIntegration.objects.all()\n template_name = \"topics/curriculum-integration.html\"\n context_object_name = \"integration\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the curriculum integration view.\n\n Returns:\n CurriculumIntegration object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n topic__slug=self.kwargs.get(\"topic_slug\", None),\n slug=self.kwargs.get(\"integration_slug\", None)\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the curriculum integration view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n # Call the base implementation first to get a context\n context = super(CurriculumIntegrationView, self).get_context_data(**kwargs)\n # Loading objects under consistent context names for breadcrumbs\n context[\"topic\"] = self.object.topic\n # Add in a QuerySet of all the connected curriculum areas\n context[\"integration_curriculum_areas\"] = self.object.curriculum_areas.order_by(\"name\")\n # Add in a QuerySet of all the prerequisite lessons\n context[\"prerequisite_lessons\"] = self.object.prerequisite_lessons.select_related().order_by(\n \"unit_plan__name\",\n )\n return context\n\n\nclass OtherResourcesView(generic.DetailView):\n \"\"\"View for detailing other resources for a specific topic.\"\"\"\n\n model = Topic\n template_name = \"topics/topic-other-resources.html\"\n slug_url_kwarg = \"topic_slug\"\n\n\nclass GlossaryList(generic.ListView):\n \"\"\"Provide glossary view of all terms.\"\"\"\n\n template_name = \"topics/glossary.html\"\n context_object_name = \"glossary_terms\"\n\n def get_queryset(self):\n \"\"\"Get queryset of all glossary terms.\n\n Returns:\n Queryset of GlossaryTerm objects ordered by term.\n \"\"\"\n return GlossaryTerm.objects.order_by(\"term\")\n\n\ndef glossary_json(request, **kwargs):\n \"\"\"Provide JSON data for glossary term.\n\n Args:\n request: The HTTP request.\n\n Returns:\n JSON response is sent containing data for the requested term.\n\n Raises:\n 404 error if term not found.\n \"\"\"\n # If term parameter, then return JSON\n if \"term\" in request.GET:\n glossary_slug = request.GET.get(\"term\")\n glossary_item = get_object_or_404(\n GlossaryTerm,\n slug=glossary_slug\n )\n data = {\n \"slug\": glossary_slug,\n \"term\": glossary_item.term,\n \"definition\": render_html_with_static(glossary_item.definition)\n }\n return JsonResponse(data)\n else:\n raise Http404(\"Term parameter not specified.\")\n", "path": "csunplugged/topics/views.py" } ]
diff --git a/csunplugged/static/img/topics/kids-parity-trick.png b/csunplugged/static/img/topics/kids-parity-trick.jpg similarity index 100% rename from csunplugged/static/img/topics/kids-parity-trick.png rename to csunplugged/static/img/topics/kids-parity-trick.jpg diff --git a/csunplugged/static/img/topics/sorting-network-variation-alphabet.png b/csunplugged/static/img/topics/sorting-network-variation-alphabet.jpg similarity index 100% rename from csunplugged/static/img/topics/sorting-network-variation-alphabet.png rename to csunplugged/static/img/topics/sorting-network-variation-alphabet.jpg diff --git a/csunplugged/static/img/topics/sorting-network-variation-aural.png b/csunplugged/static/img/topics/sorting-network-variation-aural.jpg similarity index 100% rename from csunplugged/static/img/topics/sorting-network-variation-aural.png rename to csunplugged/static/img/topics/sorting-network-variation-aural.jpg diff --git a/csunplugged/static/img/topics/sorting-network-variation-music.png b/csunplugged/static/img/topics/sorting-network-variation-music.jpg similarity index 100% rename from csunplugged/static/img/topics/sorting-network-variation-music.png rename to csunplugged/static/img/topics/sorting-network-variation-music.jpg diff --git a/csunplugged/static/img/topics/sorting-network-variation-words-2.png b/csunplugged/static/img/topics/sorting-network-variation-words-2.jpg similarity index 100% rename from csunplugged/static/img/topics/sorting-network-variation-words-2.png rename to csunplugged/static/img/topics/sorting-network-variation-words-2.jpg diff --git a/csunplugged/static/img/topics/sorting-network-variation-words.png b/csunplugged/static/img/topics/sorting-network-variation-words.jpg similarity index 100% rename from csunplugged/static/img/topics/sorting-network-variation-words.png rename to csunplugged/static/img/topics/sorting-network-variation-words.jpg diff --git a/csunplugged/static/scss/website.scss b/csunplugged/static/scss/website.scss index 550cc9a50..51e35a64d 100644 --- a/csunplugged/static/scss/website.scss +++ b/csunplugged/static/scss/website.scss @@ -13,7 +13,7 @@ $ct-pattern: #82358C; img { &.content-image { - max-height: 10em; + max-height: 18em; } &.inline-image { max-height: 3rem; @@ -100,6 +100,7 @@ $rounded-corner-radius: 0.5rem; transition: 0.1s; display: flex; flex-direction: column; + text-align: center; align-items: center; justify-content: space-around; &.link-box-md-3 { @@ -122,8 +123,6 @@ $rounded-corner-radius: 0.5rem; } h2, h3 { - margin-bottom: 0; - text-align: center; color: $gray; } &:hover { diff --git a/csunplugged/templates/base.html b/csunplugged/templates/base.html index ed92f438a..5c5d19023 100644 --- a/csunplugged/templates/base.html +++ b/csunplugged/templates/base.html @@ -35,7 +35,7 @@ 2.0 sneak peek </a> <div class="collapse navbar-collapse" id="navbarNav"> - <div class="navbar-nav ml-auto"> + <div class="navbar-nav ml-auto text-center"> <a class="nav-item nav-link" href="{% url 'topics:index' %}">Topics</a> <a class="nav-item nav-link" href="{% url 'resources:index' %}">Resources</a> <a class="nav-item nav-link" href="{% url 'general:about' %}">About</a> diff --git a/csunplugged/templates/topics/index.html b/csunplugged/templates/topics/index.html index 9d795ff65..c7e160a0d 100644 --- a/csunplugged/templates/topics/index.html +++ b/csunplugged/templates/topics/index.html @@ -23,8 +23,15 @@ <h1>{% trans "Topics" %}</h1> <div class="link-box-container"> {% for topic in all_topics %} <a class="link-box link-box-md-3 link-box-lg-4" href="{% url 'topics:topic' topic.slug %}"> - <img class="img-fluid" src="{% if topic.icon %}{% static topic.icon %}{% else %}http://placehold.it/238x200{% endif %}"> + <img class="img-fluid" src="{% static topic.icon %}"> <h3 class="link-box-title">{{ topic.name }}</h3> + {% if topic.unit_plans.count > 1 %} + <h5 class="text-muted"> + {% for unit_plan in topic.unit_plans.all %} + {{ unit_plan.name }}{% if not forloop.last %}, {% endif %} + {% endfor %} + </h5> + {% endif %} </a> {% endfor %} </div> diff --git a/csunplugged/templates/topics/lesson.html b/csunplugged/templates/topics/lesson.html index 2b4802454..af8add8ae 100644 --- a/csunplugged/templates/topics/lesson.html +++ b/csunplugged/templates/topics/lesson.html @@ -34,7 +34,7 @@ <h2 class="heading-underline">Learning outcomes</h2> <ul> {% for learning_outcome in learning_outcomes %} <li> - {{ learning_outcome.text }} + {{ learning_outcome.text }}<br> {% for area in learning_outcome.curriculum_areas.all %} {% include "topics/curriculum-area-badge.html" %} {% endfor %} diff --git a/csunplugged/templates/topics/lessons-table.html b/csunplugged/templates/topics/lessons-table.html index c239ab5a9..e6932e6b6 100644 --- a/csunplugged/templates/topics/lessons-table.html +++ b/csunplugged/templates/topics/lessons-table.html @@ -18,21 +18,21 @@ {% endif %} {% for lesson in lessons %} <tr class="align-middle"> - <td class="text-center"> + <td class="text-center" style="width:10%"> {{ lesson.number }} </td> - <td> + <td style="width:60%"> <a href="{% url 'topics:lesson' topic.slug unit_plan.slug lesson.slug %}"> <strong>{{ lesson.name }}</strong> </a> </td> {% if lesson.has_programming_challenges %} - <td class="text-center table-success-cell"> + <td class="text-center table-success-cell" style="width:20%"> <a href="{% url 'topics:programming_challenges_list' topic.slug unit_plan.slug lesson.slug %}"> Yes </a> {% else %} - <td class="text-center"> + <td class="text-center" style="width:20%"> No {% endif %} </td> diff --git a/csunplugged/templates/topics/programming-challenge.html b/csunplugged/templates/topics/programming-challenge.html index c57141445..986072d85 100644 --- a/csunplugged/templates/topics/programming-challenge.html +++ b/csunplugged/templates/topics/programming-challenge.html @@ -49,7 +49,7 @@ <h2>Learning outcomes</h2> <ul> {% for learning_outcome in learning_outcomes %} <li> - {{ learning_outcome.text }} + {{ learning_outcome.text }}<br> {% for area in learning_outcome.curriculum_areas.all %} {% include "topics/curriculum-area-badge.html" %} {% endfor %} diff --git a/csunplugged/templates/topics/topic.html b/csunplugged/templates/topics/topic.html index 3ca8456e4..43663deee 100644 --- a/csunplugged/templates/topics/topic.html +++ b/csunplugged/templates/topics/topic.html @@ -24,7 +24,7 @@ <h1 id="{{ topic.slug }}" class="heading-underline">{{ topic.name }}</h1> {% for unit_plan in unit_plans %} <div class="link-box-container"> <a class="link-box link-box-md-6" href="{% url 'topics:unit_plan' topic.slug unit_plan.slug %}"> - <h3 id="{{ unit_plan.slug }}" class="link-box-title">{{ unit_plan.name }}</h3> + <h3 id="{{ unit_plan.slug }}" class="link-box-title"><span class="text-muted">Unit plan:</span> {{ unit_plan.name }}</h3> </a> </div> @@ -38,6 +38,10 @@ <h3 id="{{ unit_plan.slug }}" class="link-box-title">{{ unit_plan.name }}</h3> {% include "topics/lessons-table.html" %} {% endwith %} {% endif %} + + {% if not forloop.last %} + <hr> + {% endif %} {% endfor %} {% if curriculum_integrations %} @@ -83,12 +87,16 @@ <h2 class="heading-underline">Table of contents</h2> <a class="nav-link" href="#{{ unit_plan.slug }}">{{ unit_plan.name }}</a> </li> {% endfor %} - <li class="nav-item"> - <a class="nav-link" href="#integrations">Curriculum integrations</a> - </li> - <li class="nav-item"> - <a class="nav-link" href="#resources">Resources</a> - </li> + {% if curriculum_integrations %} + <li class="nav-item"> + <a class="nav-link" href="#integrations">Curriculum integrations</a> + </li> + {% endif %} + {% if resources %} + <li class="nav-item"> + <a class="nav-link" href="#resources">Resources</a> + </li> + {% endif %} {% if topic.other_resources %} <li class="nav-item"> <a class="nav-link" href="#other-resources">Other resources</a> diff --git a/csunplugged/templates/topics/unit-plan.html b/csunplugged/templates/topics/unit-plan.html index 5169a934c..b8a510909 100644 --- a/csunplugged/templates/topics/unit-plan.html +++ b/csunplugged/templates/topics/unit-plan.html @@ -15,7 +15,7 @@ {% endblock breadcrumbs %} {% block page_heading %} - <h1 id="{{ unit_plan.slug }}" class="heading-underline">{{ unit_plan.name }}</h1> + <h1 id="{{ unit_plan.slug }}" class="heading-underline"><span class="text-muted">Unit plan:</span> {{ unit_plan.name }}</h1> {% endblock page_heading %} {% block left_column_content %} diff --git a/csunplugged/topics/content/en/binary-numbers/unit-plan/unit-plan.md b/csunplugged/topics/content/en/binary-numbers/unit-plan/unit-plan.md index c5f7a9e78..acc29487d 100644 --- a/csunplugged/topics/content/en/binary-numbers/unit-plan/unit-plan.md +++ b/csunplugged/topics/content/en/binary-numbers/unit-plan/unit-plan.md @@ -1,4 +1,4 @@ -# Binary Numbers Unit Plan +# Binary numbers ## See Teaching the Binary Number System in Action! diff --git a/csunplugged/topics/content/en/error-detection-and-correction/curriculum-integrations/quick-card-flip-magic.md b/csunplugged/topics/content/en/error-detection-and-correction/curriculum-integrations/quick-card-flip-magic.md index a2aa7ae7b..6379c04e0 100644 --- a/csunplugged/topics/content/en/error-detection-and-correction/curriculum-integrations/quick-card-flip-magic.md +++ b/csunplugged/topics/content/en/error-detection-and-correction/curriculum-integrations/quick-card-flip-magic.md @@ -1,18 +1,18 @@ -# Quick card flip magic +# Quick card flip magic -{image file-path="img/topics/kids-parity-trick.png"} +{image file-path="img/topics/kids-parity-trick.jpg"} *Every item of data that we store and transmit has extra bits added to it to prevent errors. Can you find the error and correct it?* - -### Equipment: + +### Equipment: At least 36, or even 100 or more cards that are black on one side and white on the other, about 20mm by 20mm (or any two colours that are easily distinguished) A clear space on the floor or a table that the students can stand around - -### Preparation: -Select a ‘magic master’. +### Preparation: + +Select a ‘magic master’. This is the person who is in control of the game. This person will change at the end of each round. @@ -22,24 +22,24 @@ This person will change at the end of each round. The grid can be any size; it should be at least 6 by 6, although it can be increased up to 10 by 10 or more to make the challenge harder. The grid doesn't have to be square (e.g. 9 by 8 is fine), but the effect is greatest when it is close to square. -2. The magic master asks everyone to close their eyes and turn away, except for the magic master and the assistant. +2. The magic master asks everyone to close their eyes and turn away, except for the magic master and the assistant. 3. The magic master asks the assistant to choose a card, place a counter or a mark under where the card goes and flip it over. 4. Once this has been done, the magic master calls out (quietly)…” let the magic begin” and presses the timer. - When the other students hear this they turn around and try to find the ‘error’. + When the other students hear this they turn around and try to find the ‘error’. -5. As soon as they spot the error, they put their finger on their nose. +5. As soon as they spot the error, they put their finger on their nose. -6. The magic master stops the timer and asks the first person who put their finger on their nose to show where the flipped card was. +6. The magic master stops the timer and asks the first person who put their finger on their nose to show where the flipped card was. -7. The student points to the flipped card, checks if they are correct by flipping the card over. +7. The student points to the flipped card, checks if they are correct by flipping the card over. -8. If they are correct, that person explains how they worked out which was the flipped card. +8. If they are correct, that person explains how they worked out which was the flipped card. -9. The magic master records the person's name who won that round and the time. +9. The magic master records the person's name who won that round and the time. -10. Did they beat the previous time? +10. Did they beat the previous time? 11. If they did beat the previous time, that person becomes the assistant. - The person who won that round stays the assistant until their time is beaten. + The person who won that round stays the assistant until their time is beaten. diff --git a/csunplugged/topics/content/en/error-detection-and-correction/unit-plan/unit-plan.md b/csunplugged/topics/content/en/error-detection-and-correction/unit-plan/unit-plan.md index 59c40f171..714e80654 100644 --- a/csunplugged/topics/content/en/error-detection-and-correction/unit-plan/unit-plan.md +++ b/csunplugged/topics/content/en/error-detection-and-correction/unit-plan/unit-plan.md @@ -1,4 +1,4 @@ -# Error detection and correction unit plan +# Error detection and correction {panel type="teaching" title="See teaching this in action!"} diff --git a/csunplugged/topics/content/en/sorting-networks/unit-plan/lessons/investigating-variations-using-the-sorting-network.md b/csunplugged/topics/content/en/sorting-networks/unit-plan/lessons/investigating-variations-using-the-sorting-network.md index 5e2f3aec3..ceeee6077 100644 --- a/csunplugged/topics/content/en/sorting-networks/unit-plan/lessons/investigating-variations-using-the-sorting-network.md +++ b/csunplugged/topics/content/en/sorting-networks/unit-plan/lessons/investigating-variations-using-the-sorting-network.md @@ -67,7 +67,7 @@ By reversing the left/right decision, the final result will be in the reverse or ### Variation 3: Letters of the alphabet -{image file-path="img/topics/sorting-network-variation-alphabet.png"} +{image file-path="img/topics/sorting-network-variation-alphabet.jpg"} Give the students cards with letters on them. Ask how we could compare these (students should observe that they could be in alphabetical order). @@ -96,7 +96,7 @@ AFFLUX, AGLOOS, ALMOST, BEGILT, BEGINS, BEGIRT, BEKNOT, BELLOW, BIJOUX, BILLOW, ### Variation 5: Sorting words in dictionary order -{image file-path="img/topics/sorting-network-variation-words.png"} +{image file-path="img/topics/sorting-network-variation-words.jpg"} Give the students cards with dictionary words on them, and ask how these might be compared. Students should observe that they could be placed in dictionary order. @@ -106,14 +106,14 @@ A variation is to give them books and have them sort them in order of the author Comparing two words or names is challenging; they will need to know to compare each character until two differ (e.g. for "crochet" and "crocodile", the "croc" prefix is the same, so it is the "h" and "o" that determine their order; this process is an algorithm in itself!) -{image file-path="img/topics/sorting-network-variation-words-2.png"} +{image file-path="img/topics/sorting-network-variation-words-2.jpg"} The words being compared could also be used to reinforce spelling or meaning; for example, the words above are the colours in Te Reo Māori, so the student with the word "kowhai" would be reinforcing that it means the colour yellow. The use of macrons and other diacritical marks also gives the opportunity to explore the order that is used in the such languages for those letters. ### Variation 6: Music notation -{image file-path="img/topics/sorting-network-variation-music.png"} +{image file-path="img/topics/sorting-network-variation-music.jpg"} Students can compare the pitch of music notation, with higher notes going to the right. If all the cards have the same clef (such as the treble clef here) then it reinforces that the height on the stave corresponds to the pitch. @@ -121,7 +121,7 @@ Advanced music students can do the comparisons with different clefs (bass, alto ### Variation 7: Music pitch - aural -{image file-path="img/topics/sorting-network-variation-aural.png"} +{image file-path="img/topics/sorting-network-variation-aural.jpg"} In this variation, students compare the pitch of simple instruments that they are carrying. The bells shown above are ideal because they are all the same size, and force students to compare them by listening. diff --git a/csunplugged/topics/content/en/sorting-networks/unit-plan/unit-plan.md b/csunplugged/topics/content/en/sorting-networks/unit-plan/unit-plan.md index 92fdf2fe7..c187386ba 100644 --- a/csunplugged/topics/content/en/sorting-networks/unit-plan/unit-plan.md +++ b/csunplugged/topics/content/en/sorting-networks/unit-plan/unit-plan.md @@ -1,4 +1,4 @@ -# Sorting networks unit plan +# Sorting networks {panel type="teaching" title="See teaching this in action!"} diff --git a/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan-ct-links.md b/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan-ct-links.md new file mode 100644 index 000000000..a09c70c09 --- /dev/null +++ b/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan-ct-links.md @@ -0,0 +1,52 @@ +{panel type="ct-algorithm" title="Algorithmic thinking"} + +Throughout this unit students will be creating algorithms to ‘program’ the bot to move across grids. +The algorithmic thinking focuses on students learning to sequence a set of instructions to complete a task, to use specific types of instructions in their algorithms, and to find equivalent ways to achieve the same outcome. + +{panel end} + +{panel type="ct-abstraction" title="Abstraction"} + +The ‘programming’ students will be doing in these activities is an abstract version of the type of programming we do on computers using conventional programming languages. +In this unit students will use a small set of very basic instructions as their programming language, write their programs in simple words, and give their instructions verbally to the bot. +This allows students to learn about sequencing in programming and practice creating algorithms without needing to learn a programming language first or use technical terminology and tools. + +The choice of commands for the programming language also demonstrate the use of abstraction (such as using "L" or an arrow to represent the command "Left"). + +In lesson 3 students will also be using another level of abstraction, as some instructions will be encapsulated inside a loop. + +{panel end} + +{panel type="ct-decomposition" title="Decomposition"} + +In every activity in this unit students will be practicing decomposition, as they break down the movements they want the bot to make into basic and specific instructions. +They will also be encouraged to write their programs incrementally, to write the first few steps, test them, and then add the next steps to the program, instead of trying to solve the whole problem at once. + +{panel end} + +{panel type="ct-pattern" title="Generalising and patterns"} + +As students write their algorithms and programs in this unit there are many different patterns that emerge in how the instructions are sequenced. +Groups of instructions might tell the Bot to move a specific way, such as walk in a square, and if students identify these patterns they can reuse these groups of instructions if they need to have the Bot make the same movements again. + +{panel end} + +{panel type="ct-evaluation" title="Evaluation"} + +Each time students write their programs and test them they will be evaluating them, because the most important thing they need to determine is “did the program work?”. Each time they test their programs they can identify whether it accomplished the task or not. + +There will almost always be multiple programs students could use to accomplish each task, but some will be more efficient than others as they will use fewer steps. +Students can compare these and explain why they think certain programs are better than others. + +There is also the opportunity to compare programming languages. We explore reducing the number of instructions available; in some cases the same goals can be achieved (the language has the same computing ability), but the programs might be longer. + +{panel end} + +{panel type="ct-logic" title="Logic"} + +Students will be continuously exercising their logical thinking skills throughout this unit. +As they are planning their algorithms they will need to step through what they think will happen with each instruction, e.g. “at this point they’ll be facing that way so if I then say move forward twice then they’ll end up on that square”. +They will also need a logical approach to debugging their programs when they do not work correctly. +They will need to step through each instruction and identify when something goes wrong and how they can fix the bug so the program then behaves as expected. + +{panel end} diff --git a/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan.md b/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan.md index 65b90dac3..9d59b95cf 100644 --- a/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan.md +++ b/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan.md @@ -10,7 +10,7 @@ A version of this can curently be seen at 1:09:56 [here](https://educationonair. Writing a computer program involves planning what you're going to do, "coding" the instructions, testing them, tracking down any bugs, and changing the program to that it works correctly. In these activities students explore a simple programming language by taking on separate roles to make these elements of programming explicit. - + This isn't completely artificial; most substantial program is written by a team of people, and often the roles of design, coding and testing are separated out. The program is made because there is a problem to solve to help other people to improve their lives. The people who write the program using a programming language are called the **programmer** (or possibly a **developer** or **software engineer**) - they write the instructions to tell the computer what to do. To be sure that the code is working exactly as it needs to for the people it’s been written for, someone needs to test the program and feeds back to the programmer(s) what needs fixing and why. @@ -22,8 +22,8 @@ The Geometry programming activities separate the programming from the testing to ## Digital Technologies | Programming Programming or "coding" is when a person (**a programmer**) types in instructions in a programming language so that the computer knows what it needs to do (programmers do lots of other things as well). -Common programming languages that are used in the junior classroom include Scratch, ScratchJr, Python, Snap!, Blockly, and some older languages like Basic and Logo (which are still popular today). - +Common programming languages that are used in the junior classroom include Scratch, ScratchJr, Python, Snap!, Blockly, and some older languages like Basic and Logo (which are still popular today). + Being a programmer isn't so much about knowing the particular commands in a programming language, but knowing how to put together the building blocks such as loops, if statements, variables, input and output in a way that the computer will do what is intended. This involves working out the general process for achieving the goal, representing that process in the particular language, and making sure that it does what is intended. @@ -32,7 +32,7 @@ This involves working out the general process for achieving the goal, representi {panel type="math" title="Mathematical links"} There are strong connections between mathematics and programming. -Good programmers need to have a good base of mathematical knowledge, since a program is effectively a mathematical formula, and getting the structure of a program right requires good reasoning. +Good programmers need to have a good base of mathematical knowledge, since a program is effectively a mathematical formula, and getting the structure of a program right requires good reasoning. {panel end} @@ -42,71 +42,15 @@ Good programmers need to have a good base of mathematical knowledge, since a pro Giving sequential instructions are an important element of all programming languages, and so the Geometry programming activities lay the foundation for understanding more conventional languages. It also exercises the ability to predict what a program will do, reason about where any bugs are, and understand that there can be multiple correct ways to program a solution. - -Being able to give exact sequential instructions, work well together and understand how to break a big problem into small pieces and then tackling the small piece one at a time are all life skills that can be transferred from computer programming to other tasks that students need to do. - -## Seeing the Computational Thinking connections - - -{panel type="ct-algorithm" title="Algorithmic thinking"} - -Throughout this unit students will be creating algorithms to ‘program’ the bot to move across grids. -The algorithmic thinking focuses on students learning to sequence a set of instructions to complete a task, to use specific types of instructions in their algorithms, and to find equivalent ways to achieve the same outcome. - -{panel end} -{panel type="ct-abstraction" title="Abstraction"} +Being able to give exact sequential instructions, work well together and understand how to break a big problem into small pieces and then tackling the small piece one at a time are all life skills that can be transferred from computer programming to other tasks that students need to do. -The ‘programming’ students will be doing in these activities is an abstract version of the type of programming we do on computers using conventional programming languages. -In this unit students will use a small set of very basic instructions as their programming language, write their programs in simple words, and give their instructions verbally to the bot. -This allows students to learn about sequencing in programming and practice creating algorithms without needing to learn a programming language first or use technical terminology and tools. - -The choice of commands for the programming language also demonstrate the use of abstraction (such as using "L" or an arrow to represent the command "Left"). - -In lesson 3 students will also be using another level of abstraction, as some instructions will be encapsulated inside a loop. - -{panel end} - -{panel type="ct-decomposition" title="Decomposition"} - -In every activity in this unit students will be practicing decomposition, as they break down the movements they want the bot to make into basic and specific instructions. -They will also be encouraged to write their programs incrementally, to write the first few steps, test them, and then add the next steps to the program, instead of trying to solve the whole problem at once. - -{panel end} - -{panel type="ct-pattern" title="Generalising and patterns"} - -As students write their algorithms and programs in this unit there are many different patterns that emerge in how the instructions are sequenced. -Groups of instructions might tell the Bot to move a specific way, such as walk in a square, and if students identify these patterns they can reuse these groups of instructions if they need to have the Bot make the same movements again. - -{panel end} - -{panel type="ct-evaluation" title="Evaluation"} - -Each time students write their programs and test them they will be evaluating them, because the most important thing they need to determine is “did the program work?”. Each time they test their programs they can identify whether it accomplished the task or not. - -There will almost always be multiple programs students could use to accomplish each task, but some will be more efficient than others as they will use fewer steps. -Students can compare these and explain why they think certain programs are better than others. - -There is also the opportunity to compare programming languages. We explore reducing the number of instructions available; in some cases the same goals can be achieved (the language has the same computing ability), but the programs might be longer. - -{panel end} - -{panel type="ct-logic" title="Logic"} - -Students will be continuously exercising their logical thinking skills throughout this unit. -As they are planning their algorithms they will need to step through what they think will happen with each instruction, e.g. “at this point they’ll be facing that way so if I then say move forward twice then they’ll end up on that square”. -They will also need a logical approach to debugging their programs when they do not work correctly. -They will need to step through each instruction and identify when something goes wrong and how they can fix the bug so the program then behaves as expected. +## Reflection questions -{panel end} +- What was most surprising about the learning that happened from the teaching of this unit? -## Reflection questions +- Who were the students who were very systematic in their activities? -- What was most surprising about the learning that happened from the teaching of this unit? +- Who were the students who were very detailed in their activities? -- Who were the students who were very systematic in their activities? - -- Who were the students who were very detailed in their activities? - -- What would I change in my delivery of this unit? +- What would I change in my delivery of this unit? diff --git a/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan.yaml b/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan.yaml index b14e01e4f..1102a9694 100644 --- a/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan.yaml +++ b/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/geometry-unit-plan.yaml @@ -1,4 +1,5 @@ lessons: lessons/lessons.yaml +computational-thinking-links: geometry-unit-plan-ct-links.md age-groups: 5-7: diff --git a/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/lessons/lessons.yaml b/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/lessons/lessons.yaml index 57f307a96..d55dcfdf8 100644 --- a/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/lessons/lessons.yaml +++ b/csunplugged/topics/content/en/unplugged-programming/geometry-unit-plan/lessons/lessons.yaml @@ -25,6 +25,7 @@ finding-2d-shapes: moving-in-a-shape: duration: 30 + computational-thinking-links: moving-in-a-shape-ct-links.md learning-outcomes: - unplugged-programming-give-instructions-shape - unplugged-programming-identify-bug-shape diff --git a/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan-ct-links.md b/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan-ct-links.md new file mode 100644 index 000000000..a09c70c09 --- /dev/null +++ b/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan-ct-links.md @@ -0,0 +1,52 @@ +{panel type="ct-algorithm" title="Algorithmic thinking"} + +Throughout this unit students will be creating algorithms to ‘program’ the bot to move across grids. +The algorithmic thinking focuses on students learning to sequence a set of instructions to complete a task, to use specific types of instructions in their algorithms, and to find equivalent ways to achieve the same outcome. + +{panel end} + +{panel type="ct-abstraction" title="Abstraction"} + +The ‘programming’ students will be doing in these activities is an abstract version of the type of programming we do on computers using conventional programming languages. +In this unit students will use a small set of very basic instructions as their programming language, write their programs in simple words, and give their instructions verbally to the bot. +This allows students to learn about sequencing in programming and practice creating algorithms without needing to learn a programming language first or use technical terminology and tools. + +The choice of commands for the programming language also demonstrate the use of abstraction (such as using "L" or an arrow to represent the command "Left"). + +In lesson 3 students will also be using another level of abstraction, as some instructions will be encapsulated inside a loop. + +{panel end} + +{panel type="ct-decomposition" title="Decomposition"} + +In every activity in this unit students will be practicing decomposition, as they break down the movements they want the bot to make into basic and specific instructions. +They will also be encouraged to write their programs incrementally, to write the first few steps, test them, and then add the next steps to the program, instead of trying to solve the whole problem at once. + +{panel end} + +{panel type="ct-pattern" title="Generalising and patterns"} + +As students write their algorithms and programs in this unit there are many different patterns that emerge in how the instructions are sequenced. +Groups of instructions might tell the Bot to move a specific way, such as walk in a square, and if students identify these patterns they can reuse these groups of instructions if they need to have the Bot make the same movements again. + +{panel end} + +{panel type="ct-evaluation" title="Evaluation"} + +Each time students write their programs and test them they will be evaluating them, because the most important thing they need to determine is “did the program work?”. Each time they test their programs they can identify whether it accomplished the task or not. + +There will almost always be multiple programs students could use to accomplish each task, but some will be more efficient than others as they will use fewer steps. +Students can compare these and explain why they think certain programs are better than others. + +There is also the opportunity to compare programming languages. We explore reducing the number of instructions available; in some cases the same goals can be achieved (the language has the same computing ability), but the programs might be longer. + +{panel end} + +{panel type="ct-logic" title="Logic"} + +Students will be continuously exercising their logical thinking skills throughout this unit. +As they are planning their algorithms they will need to step through what they think will happen with each instruction, e.g. “at this point they’ll be facing that way so if I then say move forward twice then they’ll end up on that square”. +They will also need a logical approach to debugging their programs when they do not work correctly. +They will need to step through each instruction and identify when something goes wrong and how they can fix the bug so the program then behaves as expected. + +{panel end} diff --git a/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan.md b/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan.md index 9d0858237..5cc53955e 100644 --- a/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan.md +++ b/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan.md @@ -10,7 +10,7 @@ A version of this can curently be seen at 1:09:56 [here](https://educationonair. Writing a computer program involves planning what you're going to do, "coding" the instructions, testing them, tracking down any bugs, and changing the program to that it works correctly. In these activities students explore a simple programming language by taking on separate roles to make these elements of programming explicit. - + This isn't completely artificial; most substantial program is written by a team of people, and often the roles of design, coding and testing are separated out. The program is made because there is a problem to solve to help other people to improve their lives. The people who write the program using a programming language are called the **programmer** (or possibly a **developer** or **software engineer**) - they write the instructions to tell the computer what to do. To be sure that the code is working exactly as it needs to for the people it’s been written for, someone needs to test the program and feeds back to the programmer(s) what needs fixing and why. @@ -22,8 +22,8 @@ The Kidbots activities separates the programming from the testing to avoid the p ## Digital Technologies | Programming Programming or "coding" is when a person (**a programmer**) types in instructions in a programming language so that the computer knows what it needs to do (programmers do lots of other things as well). -Common programming languages that are used in the junior classroom include Scratch, ScratchJr, Python, Snap!, Blockly, and some older languages like Basic and Logo (which are still popular today). - +Common programming languages that are used in the junior classroom include Scratch, ScratchJr, Python, Snap!, Blockly, and some older languages like Basic and Logo (which are still popular today). + Being a programmer isn't so much about knowing the particular commands in a programming language, but knowing how to put together the building blocks such as loops, if statements, variables, input and output in a way that the computer will do what is intended. This involves working out the general process for achieving the goal, representing that process in the particular language, and making sure that it does what is intended. @@ -32,7 +32,7 @@ This involves working out the general process for achieving the goal, representi {panel type="math" title="Mathematical links"} There are strong connections between mathematics and programming. -Good programmers need to have a good base of mathematical knowledge, since a program is effectively a mathematical formula, and getting the structure of a program right requires good reasoning. +Good programmers need to have a good base of mathematical knowledge, since a program is effectively a mathematical formula, and getting the structure of a program right requires good reasoning. {panel end} @@ -42,71 +42,15 @@ Good programmers need to have a good base of mathematical knowledge, since a pro Giving sequential instructions are an important element of all programming languages, and so the Kidbots activity lays the foundation for understanding more conventional languages. It also exercises the ability to predict what a program will do, reason about where any bugs are, and understand that there can be multiple correct ways to program a solution. - -Being able to give exact sequential instructions, work well together and understand how to break a big problem into small pieces and then tackling the small piece one at a time are all life skills that can be transferred from computer programming to other tasks that students need to do. - -## Seeing the Computational Thinking connections - - -{panel type="ct-algorithm" title="Algorithmic thinking"} - -Throughout this unit students will be creating algorithms to ‘program’ the bot to move across grids. -The algorithmic thinking focuses on students learning to sequence a set of instructions to complete a task, to use specific types of instructions in their algorithms, and to find equivalent ways to achieve the same outcome. - -{panel end} -{panel type="ct-abstraction" title="Abstraction"} +Being able to give exact sequential instructions, work well together and understand how to break a big problem into small pieces and then tackling the small piece one at a time are all life skills that can be transferred from computer programming to other tasks that students need to do. -The ‘programming’ students will be doing in these activities is an abstract version of the type of programming we do on computers using conventional programming languages. -In this unit students will use a small set of very basic instructions as their programming language, write their programs in simple words, and give their instructions verbally to the bot. -This allows students to learn about sequencing in programming and practice creating algorithms without needing to learn a programming language first or use technical terminology and tools. - -The choice of commands for the programming language also demonstrate the use of abstraction (such as using "L" or an arrow to represent the command "Left"). - -In lesson 3 students will also be using another level of abstraction, as some instructions will be encapsulated inside a loop. - -{panel end} - -{panel type="ct-decomposition" title="Decomposition"} - -In every activity in this unit students will be practicing decomposition, as they break down the movements they want the bot to make into basic and specific instructions. -They will also be encouraged to write their programs incrementally, to write the first few steps, test them, and then add the next steps to the program, instead of trying to solve the whole problem at once. - -{panel end} - -{panel type="ct-pattern" title="Generalising and patterns"} - -As students write their algorithms and programs in this unit there are many different patterns that emerge in how the instructions are sequenced. -Groups of instructions might tell the Bot to move a specific way, such as walk in a square, and if students identify these patterns they can reuse these groups of instructions if they need to have the Bot make the same movements again. - -{panel end} - -{panel type="ct-evaluation" title="Evaluation"} - -Each time students write their programs and test them they will be evaluating them, because the most important thing they need to determine is “did the program work?”. Each time they test their programs they can identify whether it accomplished the task or not. - -There will almost always be multiple programs students could use to accomplish each task, but some will be more efficient than others as they will use fewer steps. -Students can compare these and explain why they think certain programs are better than others. - -There is also the opportunity to compare programming languages. We explore reducing the number of instructions available; in some cases the same goals can be achieved (the language has the same computing ability), but the programs might be longer. - -{panel end} - -{panel type="ct-logic" title="Logic"} - -Students will be continuously exercising their logical thinking skills throughout this unit. -As they are planning their algorithms they will need to step through what they think will happen with each instruction, e.g. “at this point they’ll be facing that way so if I then say move forward twice then they’ll end up on that square”. -They will also need a logical approach to debugging their programs when they do not work correctly. -They will need to step through each instruction and identify when something goes wrong and how they can fix the bug so the program then behaves as expected. +## Reflection questions -{panel end} +- What was most surprising about the learning that happened from the teaching of this unit? -## Reflection questions +- Who were the students who were very systematic in their activities? -- What was most surprising about the learning that happened from the teaching of this unit? +- Who were the students who were very detailed in their activities? -- Who were the students who were very systematic in their activities? - -- Who were the students who were very detailed in their activities? - -- What would I change in my delivery of this unit? +- What would I change in my delivery of this unit? diff --git a/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan.yaml b/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan.yaml index 3351027a2..0b3512e35 100644 --- a/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan.yaml +++ b/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/kidbots-unit-plan.yaml @@ -1,4 +1,5 @@ lessons: lessons/lessons.yaml +computational-thinking-links: kidbots-unit-plan-ct-links.md age-groups: 5-7: diff --git a/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/lessons/fitness-unplugged-ct-links.md b/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/lessons/fitness-unplugged-ct-links.md index 28e86356a..1cc07f816 100644 --- a/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/lessons/fitness-unplugged-ct-links.md +++ b/csunplugged/topics/content/en/unplugged-programming/kidbots-unit-plan/lessons/fitness-unplugged-ct-links.md @@ -15,7 +15,6 @@ Were they able to interpret the loop correctly? {panel type="ct-abstraction" title="Abstraction"} The symbols on the cards represented the physical actions students needed to perform; they were an abstract representation. -Abstraction was also used to simplify things when the hula hoop was used as a #### Examples of what you could look for: @@ -25,7 +24,7 @@ Were the students able to work with the symbols? {panel type="ct-decomposition" title="Decomposition"} -The hula hoop represents a compound action, as the cards inside it were a sub-list of actions that could be thought of as a single action that is composed of the sub-actions. +The hula hoop represents a compound action, as the cards inside it were a sub-list of actions that could be thought of as a single action that is composed of the sub-actions. #### Examples of what you could look for: @@ -62,7 +61,7 @@ When students encounter bugs in their programs they will need to logically step They will need to think about what they expect to happen when each instruction is followed, and if they do not get the result they expected they will need to identify what went wrong, why it went wrong, and how to fix it. This requires students to apply their logical thinking skills -#### Examples of what you could look for: +#### Examples of what you could look for: Do students go through their instructions and predict what will happen each time one is executed? When they debug their instructions do they use a logical method to do this? diff --git a/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan-ct-links.md b/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan-ct-links.md new file mode 100644 index 000000000..d41a339b1 --- /dev/null +++ b/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan-ct-links.md @@ -0,0 +1,52 @@ +{panel type="ct-algorithm" title="Algorithmic thinking"} + +Throughout this unit students will be creating algorithms to ‘program’ the Bot to move across grids. +The algorithmic thinking focuses on students learning to sequence a set of instructions to complete a task, to use specific types of instructions in their algorithms, and to find equivalent ways to achieve the same outcome. + +{panel end} + +{panel type="ct-abstraction" title="Abstraction"} + +The ‘programming’ students will be doing in these activities is an abstract version of the type of programming we do on computers using conventional programming languages. +In this unit students will use a small set of very basic instructions as their programming language, write their programs in simple words, and give their instructions verbally to the bot. +This allows students to learn about sequencing in programming and practice creating algorithms without needing to learn a programming language first or use technical terminology and tools. + +The choice of commands for the programming language also demonstrate the use of abstraction (such as using "L" or an arrow to represent the command "Left"). + +In lesson 3 students will also be using another level of abstraction, as some instructions will be encapsulated inside a loop. + +{panel end} + +{panel type="ct-decomposition" title="Decomposition"} + +In every activity in this unit students will be practicing decomposition, as they break down the movements they want the bot to make into basic and specific instructions. +They will also be encouraged to write their programs incrementally, to write the first few steps, test them, and then add the next steps to the program, instead of trying to solve the whole problem at once. + +{panel end} + +{panel type="ct-pattern" title="Generalising and patterns"} + +As students write their algorithms and programs in this unit there are many different patterns that emerge in how the instructions are sequenced. +Groups of instructions might tell the Bot to move a specific way, such as walk in a square, and if students identify these patterns they can reuse these groups of instructions if they need to have the Bot make the same movements again. + +{panel end} + +{panel type="ct-evaluation" title="Evaluation"} + +Each time students write their programs and test them they will be evaluating them, because the most important thing they need to determine is “did the program work?”. Each time they test their programs they can identify whether it accomplished the task or not. + +There will almost always be multiple programs students could use to accomplish each task, but some will be more efficient than others as they will use fewer steps. +Students can compare these and explain why they think certain programs are better than others. + +There is also the opportunity to compare programming languages. We explore reducing the number of instructions available; in some cases the same goals can be achieved (the language has the same computing ability), but the programs might be longer. + +{panel end} + +{panel type="ct-logic" title="Logic"} + +Students will be continuously exercising their logical thinking skills throughout this unit. +As they are planning their algorithms they will need to step through what they think will happen with each instruction, e.g. “at this point they’ll be facing that way so if I then say move forward twice then they’ll end up on that square”. +They will also need a logical approach to debugging their programs when they do not work correctly. +They will need to step through each instruction and identify when something goes wrong and how they can fix the bug so the program then behaves as expected. + +{panel end} diff --git a/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan.md b/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan.md index ec8dbb6d6..620aeb87d 100644 --- a/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan.md +++ b/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan.md @@ -6,7 +6,7 @@ Writing a computer program involves planning what you're going to do, "coding" the instructions, testing them, tracking down any bugs, and changing the program to that it works correctly. In these activities students explore a simple programming language by taking on separate roles to make these elements of programming explicit. - + This isn't completely artificial; most substantial program is written by a team of people, and often the roles of design, coding and testing are separated out. The program is made because there is a problem to solve to help other people to improve their lives. The people who write the program using a programming language are called the **programmer** (or possibly a **developer** or **software engineer**) - they write the instructions to tell the computer what to do. To be sure that the code is working exactly as it needs to for the people it’s been written for, someone needs to test the program and feeds back to the programmer(s) what needs fixing and why. @@ -18,8 +18,8 @@ The Numeracy programming activities separates the programming from the testing t ## Digital Technologies | Programming Programming or "coding" is when a person (**a programmer**) types in instructions in a programming language so that the computer knows what it needs to do (programmers do lots of other things as well). -Common programming languages that are used in the junior classroom include Scratch, ScratchJr, Python, Snap!, Blockly, and some older languages like Basic and Logo (which are still popular today). - +Common programming languages that are used in the junior classroom include Scratch, ScratchJr, Python, Snap!, Blockly, and some older languages like Basic and Logo (which are still popular today). + Being a programmer isn't so much about knowing the particular commands in a programming language, but knowing how to put together the building blocks such as loops, if statements, variables, input and output in a way that the computer will do what is intended. This involves working out the general process for achieving the goal, representing that process in the particular language, and making sure that it does what is intended. @@ -28,7 +28,7 @@ This involves working out the general process for achieving the goal, representi {panel type="math" title="Mathematical links"} There are strong connections between mathematics and programming. -Good programmers need to have a good base of mathematical knowledge, since a program is effectively a mathematical formula, and getting the structure of a program right requires good reasoning. +Good programmers need to have a good base of mathematical knowledge, since a program is effectively a mathematical formula, and getting the structure of a program right requires good reasoning. {panel end} @@ -38,71 +38,15 @@ Good programmers need to have a good base of mathematical knowledge, since a pro Giving sequential instructions are an important element of all programming languages, and so the numeracy programming activities lay the foundation for understanding more conventional languages. It also exercises the ability to predict what a program will do, reason about where any bugs are, and understand that there can be multiple correct ways to program a solution. - -Being able to give exact sequential instructions, work well together and understand how to break a big problem into small pieces and then tackling the small piece one at a time are all life skills that can be transferred from computer programming to other tasks that students need to do. - -## Seeing the Computational Thinking connections - - -{panel type="ct-algorithm" title="Algorithmic thinking"} - -Throughout this unit students will be creating algorithms to ‘program’ the Bot to move across grids. -The algorithmic thinking focuses on students learning to sequence a set of instructions to complete a task, to use specific types of instructions in their algorithms, and to find equivalent ways to achieve the same outcome. - -{panel end} -{panel type="ct-abstraction" title="Abstraction"} +Being able to give exact sequential instructions, work well together and understand how to break a big problem into small pieces and then tackling the small piece one at a time are all life skills that can be transferred from computer programming to other tasks that students need to do. -The ‘programming’ students will be doing in these activities is an abstract version of the type of programming we do on computers using conventional programming languages. -In this unit students will use a small set of very basic instructions as their programming language, write their programs in simple words, and give their instructions verbally to the bot. -This allows students to learn about sequencing in programming and practice creating algorithms without needing to learn a programming language first or use technical terminology and tools. - -The choice of commands for the programming language also demonstrate the use of abstraction (such as using "L" or an arrow to represent the command "Left"). - -In lesson 3 students will also be using another level of abstraction, as some instructions will be encapsulated inside a loop. - -{panel end} - -{panel type="ct-decomposition" title="Decomposition"} - -In every activity in this unit students will be practicing decomposition, as they break down the movements they want the bot to make into basic and specific instructions. -They will also be encouraged to write their programs incrementally, to write the first few steps, test them, and then add the next steps to the program, instead of trying to solve the whole problem at once. - -{panel end} - -{panel type="ct-pattern" title="Generalising and patterns"} - -As students write their algorithms and programs in this unit there are many different patterns that emerge in how the instructions are sequenced. -Groups of instructions might tell the Bot to move a specific way, such as walk in a square, and if students identify these patterns they can reuse these groups of instructions if they need to have the Bot make the same movements again. - -{panel end} - -{panel type="ct-evaluation" title="Evaluation"} - -Each time students write their programs and test them they will be evaluating them, because the most important thing they need to determine is “did the program work?”. Each time they test their programs they can identify whether it accomplished the task or not. - -There will almost always be multiple programs students could use to accomplish each task, but some will be more efficient than others as they will use fewer steps. -Students can compare these and explain why they think certain programs are better than others. - -There is also the opportunity to compare programming languages. We explore reducing the number of instructions available; in some cases the same goals can be achieved (the language has the same computing ability), but the programs might be longer. - -{panel end} - -{panel type="ct-logic" title="Logic"} - -Students will be continuously exercising their logical thinking skills throughout this unit. -As they are planning their algorithms they will need to step through what they think will happen with each instruction, e.g. “at this point they’ll be facing that way so if I then say move forward twice then they’ll end up on that square”. -They will also need a logical approach to debugging their programs when they do not work correctly. -They will need to step through each instruction and identify when something goes wrong and how they can fix the bug so the program then behaves as expected. +## Reflection questions -{panel end} +- What was most surprising about the learning that happened from the teaching of this unit? -## Reflection questions +- Who were the students who were very systematic in their activities? -- What was most surprising about the learning that happened from the teaching of this unit? +- Who were the students who were very detailed in their activities? -- Who were the students who were very systematic in their activities? - -- Who were the students who were very detailed in their activities? - -- What would I change in my delivery of this unit? +- What would I change in my delivery of this unit? diff --git a/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan.yaml b/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan.yaml index dcc863030..e88c11bc8 100644 --- a/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan.yaml +++ b/csunplugged/topics/content/en/unplugged-programming/numeracy-unit-plan/numeracy-unit-plan.yaml @@ -1,4 +1,5 @@ lessons: lessons/lessons.yaml +computational-thinking-links: numeracy-unit-plan-ct-links.md age-groups: 8-10: diff --git a/csunplugged/topics/views.py b/csunplugged/topics/views.py index 1272507ea..3a94d82f8 100644 --- a/csunplugged/topics/views.py +++ b/csunplugged/topics/views.py @@ -31,7 +31,7 @@ def get_queryset(self): Returns: Queryset of Topic objects ordered by name. """ - return Topic.objects.order_by("name") + return Topic.objects.order_by("name").prefetch_related("unit_plans") class TopicView(generic.DetailView):
django-json-api__django-rest-framework-json-api-715
Use extra requires to seperate optional features django-filter and polymorphic The optional features (Polymorphic and Django filter) should define their dependencies as extra. Currently this is only done as test requires but actual users won't have enforced minimum requirements. Once this is done a user can simply add following into their requirements to properly activate an optional feature: ``` djangorestframework-jsonapi[django-filter] == 2.8.0 ``` see https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(\n get_version('rest_framework_json_api')))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='djangorestframework-jsonapi',\n version=get_version('rest_framework_json_api'),\n url='https://github.com/django-json-api/django-rest-framework-json-api',\n license='BSD',\n description='A Django REST framework API adapter for the JSON API spec.',\n long_description=read('README.rst'),\n author='Jerel Unruh',\n author_email='',\n packages=get_packages('rest_framework_json_api'),\n package_data=get_package_data('rest_framework_json_api'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'inflection>=0.3.0',\n 'djangorestframework>=3.10',\n 'django>=1.11',\n ],\n python_requires=\">=3.5\",\n zip_safe=False,\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\n\ndef read(*paths):\n \"\"\"\n Build a file path from paths and return the contents.\n \"\"\"\n with open(os.path.join(*paths), 'r') as f:\n return f.read()\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n os.system(\"python setup.py bdist_wheel upload\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(\n get_version('rest_framework_json_api')))\n print(\" git push --tags\")\n sys.exit()\n\nsetup(\n name='djangorestframework-jsonapi',\n version=get_version('rest_framework_json_api'),\n url='https://github.com/django-json-api/django-rest-framework-json-api',\n license='BSD',\n description='A Django REST framework API adapter for the JSON API spec.',\n long_description=read('README.rst'),\n author='Jerel Unruh',\n author_email='',\n packages=get_packages('rest_framework_json_api'),\n package_data=get_package_data('rest_framework_json_api'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'inflection>=0.3.0',\n 'djangorestframework>=3.10',\n 'django>=1.11',\n ],\n extras_require={\n 'django-polymorphic': ['django-polymorphic>=2.0'],\n 'django-filter': ['django-filter>=2.0']\n },\n python_requires=\">=3.5\",\n zip_safe=False,\n)\n", "path": "setup.py" } ]
diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ac200cd..3e967fc7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,15 @@ This release is not backwards compatible. For easy migration best upgrade first * Add support for Django REST framework 3.10. * Add code from ErrorDetail into the JSON:API error object +### Changed + +* Moved dependency definition for `django-polymorphic` and `django-filter` into extra requires. + Hence dependencies of each optional module can be installed with pip using + ``` + pip install djangorestframework-jsonapi['django-polymorphic'] + pip install djangorestframework-jsonapi['django-filter']` + ``` + ### Removed * Removed support for Python 2.7 and 3.4. diff --git a/README.rst b/README.rst index 4f3d8aaa..2a53723c 100644 --- a/README.rst +++ b/README.rst @@ -101,6 +101,9 @@ From PyPI :: $ pip install djangorestframework-jsonapi + $ # for optional package integrations + $ pip install djangorestframework-jsonapi['django-filter'] + $ pip install djangorestframework-jsonapi['django-polymorphic'] From Source diff --git a/docs/getting-started.md b/docs/getting-started.md index 93096d79..d89eb248 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -60,6 +60,9 @@ like the following: From PyPI pip install djangorestframework-jsonapi + # for optional package integrations + pip install djangorestframework-jsonapi['django-filter'] + pip install djangorestframework-jsonapi['django-polymorphic'] From Source diff --git a/docs/usage.md b/docs/usage.md index a655687a..fc8e2895 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -4,7 +4,7 @@ The DJA package implements a custom renderer, parser, exception handler, query filter backends, and pagination. To get started enable the pieces in `settings.py` that you want to use. -Many features of the [JSON:API](http://jsonapi.org/format) format standard have been implemented using +Many features of the [JSON:API](http://jsonapi.org/format) format standard have been implemented using Mixin classes in `serializers.py`. The easiest way to make use of those features is to import ModelSerializer variants from `rest_framework_json_api` instead of the usual `rest_framework` @@ -108,7 +108,8 @@ class MyLimitPagination(JsonApiLimitOffsetPagination): Following are descriptions of JSON:API-specific filter backends and documentation on suggested usage for a standard DRF keyword-search filter backend that makes it consistent with JSON:API. -#### `QueryParameterValidationFilter` +#### QueryParameterValidationFilter + `QueryParameterValidationFilter` validates query parameters to be one of the defined JSON:API query parameters (sort, include, filter, fields, page) and returns a `400 Bad Request` if a non-matching query parameter is used. This can help the client identify misspelled query parameters, for example. @@ -131,7 +132,8 @@ class MyQPValidator(QueryValidationFilter): If you don't care if non-JSON:API query parameters are allowed (and potentially silently ignored), simply don't use this filter backend. -#### `OrderingFilter` +#### OrderingFilter + `OrderingFilter` implements the [JSON:API `sort`](http://jsonapi.org/format/#fetching-sorting) and uses DRF's [ordering filter](http://django-rest-framework.readthedocs.io/en/latest/api-guide/filtering/#orderingfilter). @@ -155,7 +157,8 @@ field name and the other two are not valid: If you want to silently ignore bad sort fields, just use `rest_framework.filters.OrderingFilter` and set `ordering_param` to `sort`. -#### `DjangoFilterBackend` +#### DjangoFilterBackend + `DjangoFilterBackend` implements a Django ORM-style [JSON:API `filter`](http://jsonapi.org/format/#fetching-filtering) using the [django-filter](https://django-filter.readthedocs.io/) package. @@ -178,13 +181,6 @@ Filters can be: - A related resource path can be used: `?filter[inventory.item.partNum]=123456` (where `inventory.item` is the relationship path) -If you are also using [`SearchFilter`](#searchfilter) -(which performs single parameter searches across multiple fields) you'll want to customize the name of the query -parameter for searching to make sure it doesn't conflict with a field name defined in the filterset. -The recommended value is: `search_param="filter[search]"` but just make sure it's -`filter[_something_]` to comply with the JSON:API spec requirement to use the filter -keyword. The default is `REST_FRAMEWORK['SEARCH_PARAM']` unless overriden. - The filter returns a `400 Bad Request` error for invalid filter query parameters as in this example for `GET http://127.0.0.1:8000/nopage-entries?filter[bad]=1`: ```json @@ -201,7 +197,11 @@ for `GET http://127.0.0.1:8000/nopage-entries?filter[bad]=1`: } ``` -#### `SearchFilter` +As this feature depends on `django-filter` you need to run + + pip install djangorestframework-jsonapi['django-filter'] + +#### SearchFilter To comply with JSON:API query parameter naming standards, DRF's [SearchFilter](https://django-rest-framework.readthedocs.io/en/latest/api-guide/filtering/#searchfilter) should @@ -211,12 +211,11 @@ adding the `.search_param` attribute to a custom class derived from `SearchFilte use [`DjangoFilterBackend`](#djangofilterbackend), make sure you set the same values for both classes. - #### Configuring Filter Backends You can configure the filter backends either by setting the `REST_FRAMEWORK['DEFAULT_FILTER_BACKENDS']` as shown in the [example settings](#configuration) or individually add them as `.filter_backends` View attributes: - + ```python from rest_framework_json_api import filters from rest_framework_json_api import django_filters @@ -699,6 +698,10 @@ DJA tests its polymorphic support against [django-polymorphic](https://django-po The polymorphic feature should also work with other popular libraries like django-polymodels or django-typed-models. +As this feature depends on `django-polymorphic` you need to run + + pip install djangorestframework-jsonapi['django-polymorphic'] + #### Writing polymorphic resources A polymorphic endpoint can be set up if associated with a polymorphic serializer. diff --git a/setup.py b/setup.py index d9c1c926..7181beb0 100755 --- a/setup.py +++ b/setup.py @@ -90,6 +90,10 @@ def get_package_data(package): 'djangorestframework>=3.10', 'django>=1.11', ], + extras_require={ + 'django-polymorphic': ['django-polymorphic>=2.0'], + 'django-filter': ['django-filter>=2.0'] + }, python_requires=">=3.5", zip_safe=False, )
pydantic__pydantic-3197
<Model>.schema() method handles Enum and IntEnum default field resolution differently ### Checks * [x] I added a descriptive title to this issue * [x] I have searched (google, github) for similar issues and couldn't find anything * [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug <!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) --> # Bug Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`: ``` pydantic version: 1.8.2 pydantic compiled: True install path: C:\Users\jmartins\.virtualenvs\pydantic_bug_report-NJE4-7fw\Lib\site-packages\pydantic python version: 3.9.6 (tags/v3.9.6:db3ff76, Jun 28 2021, 15:26:21) [MSC v.1929 64 bit (AMD64)] platform: Windows-10-10.0.19042-SP0 optional deps. installed: ['typing-extensions'] ``` <!-- or if you're using pydantic prior to v1.3, manually include: OS, python version and pydantic version --> <!-- Please read the [docs](https://pydantic-docs.helpmanual.io/) and search through issues to confirm your bug hasn't already been reported. --> <!-- Where possible please include a self-contained code snippet describing your bug: --> Generating a schema with the .schema() method works as expected when resolving default values of Enum type, while it does not resolve default values of IntEnum type the same way. A minimum example follows: ```py # std lib imports from enum import Enum, IntEnum # external imports from pydantic import BaseModel class ExampleEnum(Enum): A = "a" class ExampleIntEnum(IntEnum): A = 1 class ExampleModel(BaseModel): example_enum: ExampleEnum = ExampleEnum.A example_int_enum: ExampleIntEnum = ExampleIntEnum.A generated_schema_properties = ExampleModel.schema().get("properties", {}) example_enum_generated_default = generated_schema_properties.get("example_enum", {}).get("default", None) example_int_enum_generated_default = generated_schema_properties.get("example_int_enum", {}).get("default", None) print(example_enum_generated_default is ExampleEnum.A.value) # -> True print(example_int_enum_generated_default is ExampleIntEnum.A.value) # -> False ``` I've tracked the issue down to the `encode_default` function in `schema.py`: ```py def encode_default(dft: Any) -> Any: if isinstance(dft, (int, float, str)): return dft elif sequence_like(dft): t = dft.__class__ return t(encode_default(v) for v in dft) elif isinstance(dft, dict): return {encode_default(k): encode_default(v) for k, v in dft.items()} elif dft is None: return None else: return pydantic_encoder(dft) ``` When resolving defaults for Enum the else clause is correctly used, but since `isinstance(ExampleIntEnum.A, int)` is truthy it returns ExampleIntEnum.A when using an IntEnum. I would suggest changing the first if to a stricter direct 'primitive' type check like `if type(dft) in (int, float, str):`. I can do this myself and open a PR if there is interest and no opposition to a stricter type check.
[ { "content": "import re\nimport warnings\nfrom collections import defaultdict\nfrom datetime import date, datetime, time, timedelta\nfrom decimal import Decimal\nfrom enum import Enum\nfrom ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n FrozenSet,\n Generic,\n Iterable,\n List,\n Optional,\n Pattern,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n)\nfrom uuid import UUID\n\nfrom typing_extensions import Annotated, Literal\n\nfrom .fields import (\n MAPPING_LIKE_SHAPES,\n SHAPE_DEQUE,\n SHAPE_FROZENSET,\n SHAPE_GENERIC,\n SHAPE_ITERABLE,\n SHAPE_LIST,\n SHAPE_SEQUENCE,\n SHAPE_SET,\n SHAPE_SINGLETON,\n SHAPE_TUPLE,\n SHAPE_TUPLE_ELLIPSIS,\n FieldInfo,\n ModelField,\n)\nfrom .json import pydantic_encoder\nfrom .networks import AnyUrl, EmailStr\nfrom .types import (\n ConstrainedDecimal,\n ConstrainedFloat,\n ConstrainedInt,\n ConstrainedList,\n ConstrainedSet,\n ConstrainedStr,\n SecretBytes,\n SecretStr,\n conbytes,\n condecimal,\n confloat,\n conint,\n conlist,\n conset,\n constr,\n)\nfrom .typing import (\n ForwardRef,\n all_literal_values,\n get_args,\n get_origin,\n is_callable_type,\n is_literal_type,\n is_namedtuple,\n is_none_type,\n is_union_origin,\n)\nfrom .utils import ROOT_KEY, get_model, lenient_issubclass, sequence_like\n\nif TYPE_CHECKING:\n from .dataclasses import Dataclass\n from .main import BaseModel\n\ndefault_prefix = '#/definitions/'\ndefault_ref_template = '#/definitions/{model}'\n\nTypeModelOrEnum = Union[Type['BaseModel'], Type[Enum]]\nTypeModelSet = Set[TypeModelOrEnum]\n\n\ndef schema(\n models: Sequence[Union[Type['BaseModel'], Type['Dataclass']]],\n *,\n by_alias: bool = True,\n title: Optional[str] = None,\n description: Optional[str] = None,\n ref_prefix: Optional[str] = None,\n ref_template: str = default_ref_template,\n) -> Dict[str, Any]:\n \"\"\"\n Process a list of models and generate a single JSON Schema with all of them defined in the ``definitions``\n top-level JSON key, including their sub-models.\n\n :param models: a list of models to include in the generated JSON Schema\n :param by_alias: generate the schemas using the aliases defined, if any\n :param title: title for the generated schema that includes the definitions\n :param description: description for the generated schema\n :param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the\n default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere\n else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the\n top-level key ``definitions``, so you can extract them from there. But all the references will have the set\n prefix.\n :param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful\n for references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For\n a sibling json file in a ``/schemas`` directory use ``\"/schemas/${model}.json#\"``.\n :return: dict with the JSON Schema with a ``definitions`` top-level key including the schema definitions for\n the models and sub-models passed in ``models``.\n \"\"\"\n clean_models = [get_model(model) for model in models]\n flat_models = get_flat_models_from_models(clean_models)\n model_name_map = get_model_name_map(flat_models)\n definitions = {}\n output_schema: Dict[str, Any] = {}\n if title:\n output_schema['title'] = title\n if description:\n output_schema['description'] = description\n for model in clean_models:\n m_schema, m_definitions, m_nested_models = model_process_schema(\n model,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n )\n definitions.update(m_definitions)\n model_name = model_name_map[model]\n definitions[model_name] = m_schema\n if definitions:\n output_schema['definitions'] = definitions\n return output_schema\n\n\ndef model_schema(\n model: Union[Type['BaseModel'], Type['Dataclass']],\n by_alias: bool = True,\n ref_prefix: Optional[str] = None,\n ref_template: str = default_ref_template,\n) -> Dict[str, Any]:\n \"\"\"\n Generate a JSON Schema for one model. With all the sub-models defined in the ``definitions`` top-level\n JSON key.\n\n :param model: a Pydantic model (a class that inherits from BaseModel)\n :param by_alias: generate the schemas using the aliases defined, if any\n :param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the\n default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere\n else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the\n top-level key ``definitions``, so you can extract them from there. But all the references will have the set\n prefix.\n :param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful for\n references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For a\n sibling json file in a ``/schemas`` directory use ``\"/schemas/${model}.json#\"``.\n :return: dict with the JSON Schema for the passed ``model``\n \"\"\"\n model = get_model(model)\n flat_models = get_flat_models_from_model(model)\n model_name_map = get_model_name_map(flat_models)\n model_name = model_name_map[model]\n m_schema, m_definitions, nested_models = model_process_schema(\n model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, ref_template=ref_template\n )\n if model_name in nested_models:\n # model_name is in Nested models, it has circular references\n m_definitions[model_name] = m_schema\n m_schema = get_schema_ref(model_name, ref_prefix, ref_template, False)\n if m_definitions:\n m_schema.update({'definitions': m_definitions})\n return m_schema\n\n\ndef get_field_info_schema(field: ModelField) -> Tuple[Dict[str, Any], bool]:\n schema_overrides = False\n\n # If no title is explicitly set, we don't set title in the schema for enums.\n # The behaviour is the same as `BaseModel` reference, where the default title\n # is in the definitions part of the schema.\n schema_: Dict[str, Any] = {}\n if field.field_info.title or not lenient_issubclass(field.type_, Enum):\n schema_['title'] = field.field_info.title or field.alias.title().replace('_', ' ')\n\n if field.field_info.title:\n schema_overrides = True\n\n if field.field_info.description:\n schema_['description'] = field.field_info.description\n schema_overrides = True\n\n if (\n not field.required\n and not field.field_info.const\n and field.default is not None\n and not is_callable_type(field.outer_type_)\n ):\n schema_['default'] = encode_default(field.default)\n schema_overrides = True\n\n return schema_, schema_overrides\n\n\ndef field_schema(\n field: ModelField,\n *,\n by_alias: bool = True,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_prefix: Optional[str] = None,\n ref_template: str = default_ref_template,\n known_models: TypeModelSet = None,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n Process a Pydantic field and return a tuple with a JSON Schema for it as the first item.\n Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field\n is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they\n will be included in the definitions and referenced in the schema instead of included recursively.\n\n :param field: a Pydantic ``ModelField``\n :param by_alias: use the defined alias (if any) in the returned schema\n :param model_name_map: used to generate the JSON Schema references to other models included in the definitions\n :param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of\n #/definitions/ will be used\n :param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful for\n references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For a\n sibling json file in a ``/schemas`` directory use ``\"/schemas/${model}.json#\"``.\n :param known_models: used to solve circular references\n :return: tuple of the schema for this field and additional definitions\n \"\"\"\n s, schema_overrides = get_field_info_schema(field)\n\n validation_schema = get_field_schema_validations(field)\n if validation_schema:\n s.update(validation_schema)\n schema_overrides = True\n\n f_schema, f_definitions, f_nested_models = field_type_schema(\n field,\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models or set(),\n )\n # $ref will only be returned when there are no schema_overrides\n if '$ref' in f_schema:\n return f_schema, f_definitions, f_nested_models\n else:\n s.update(f_schema)\n return s, f_definitions, f_nested_models\n\n\nnumeric_types = (int, float, Decimal)\n_str_types_attrs: Tuple[Tuple[str, Union[type, Tuple[type, ...]], str], ...] = (\n ('max_length', numeric_types, 'maxLength'),\n ('min_length', numeric_types, 'minLength'),\n ('regex', str, 'pattern'),\n)\n\n_numeric_types_attrs: Tuple[Tuple[str, Union[type, Tuple[type, ...]], str], ...] = (\n ('gt', numeric_types, 'exclusiveMinimum'),\n ('lt', numeric_types, 'exclusiveMaximum'),\n ('ge', numeric_types, 'minimum'),\n ('le', numeric_types, 'maximum'),\n ('multiple_of', numeric_types, 'multipleOf'),\n)\n\n\ndef get_field_schema_validations(field: ModelField) -> Dict[str, Any]:\n \"\"\"\n Get the JSON Schema validation keywords for a ``field`` with an annotation of\n a Pydantic ``FieldInfo`` with validation arguments.\n \"\"\"\n f_schema: Dict[str, Any] = {}\n\n if lenient_issubclass(field.type_, Enum):\n # schema is already updated by `enum_process_schema`; just update with field extra\n if field.field_info.extra:\n f_schema.update(field.field_info.extra)\n return f_schema\n\n if lenient_issubclass(field.type_, (str, bytes)):\n for attr_name, t, keyword in _str_types_attrs:\n attr = getattr(field.field_info, attr_name, None)\n if isinstance(attr, t):\n f_schema[keyword] = attr\n if lenient_issubclass(field.type_, numeric_types) and not issubclass(field.type_, bool):\n for attr_name, t, keyword in _numeric_types_attrs:\n attr = getattr(field.field_info, attr_name, None)\n if isinstance(attr, t):\n f_schema[keyword] = attr\n if field.field_info is not None and field.field_info.const:\n f_schema['const'] = field.default\n if field.field_info.extra:\n f_schema.update(field.field_info.extra)\n modify_schema = getattr(field.outer_type_, '__modify_schema__', None)\n if modify_schema:\n modify_schema(f_schema)\n return f_schema\n\n\ndef get_model_name_map(unique_models: TypeModelSet) -> Dict[TypeModelOrEnum, str]:\n \"\"\"\n Process a set of models and generate unique names for them to be used as keys in the JSON Schema\n definitions. By default the names are the same as the class name. But if two models in different Python\n modules have the same name (e.g. \"users.Model\" and \"items.Model\"), the generated names will be\n based on the Python module path for those conflicting models to prevent name collisions.\n\n :param unique_models: a Python set of models\n :return: dict mapping models to names\n \"\"\"\n name_model_map = {}\n conflicting_names: Set[str] = set()\n for model in unique_models:\n model_name = normalize_name(model.__name__)\n if model_name in conflicting_names:\n model_name = get_long_model_name(model)\n name_model_map[model_name] = model\n elif model_name in name_model_map:\n conflicting_names.add(model_name)\n conflicting_model = name_model_map.pop(model_name)\n name_model_map[get_long_model_name(conflicting_model)] = conflicting_model\n name_model_map[get_long_model_name(model)] = model\n else:\n name_model_map[model_name] = model\n return {v: k for k, v in name_model_map.items()}\n\n\ndef get_flat_models_from_model(model: Type['BaseModel'], known_models: TypeModelSet = None) -> TypeModelSet:\n \"\"\"\n Take a single ``model`` and generate a set with itself and all the sub-models in the tree. I.e. if you pass\n model ``Foo`` (subclass of Pydantic ``BaseModel``) as ``model``, and it has a field of type ``Bar`` (also\n subclass of ``BaseModel``) and that model ``Bar`` has a field of type ``Baz`` (also subclass of ``BaseModel``),\n the return value will be ``set([Foo, Bar, Baz])``.\n\n :param model: a Pydantic ``BaseModel`` subclass\n :param known_models: used to solve circular references\n :return: a set with the initial model and all its sub-models\n \"\"\"\n known_models = known_models or set()\n flat_models: TypeModelSet = set()\n flat_models.add(model)\n known_models |= flat_models\n fields = cast(Sequence[ModelField], model.__fields__.values())\n flat_models |= get_flat_models_from_fields(fields, known_models=known_models)\n return flat_models\n\n\ndef get_flat_models_from_field(field: ModelField, known_models: TypeModelSet) -> TypeModelSet:\n \"\"\"\n Take a single Pydantic ``ModelField`` (from a model) that could have been declared as a sublcass of BaseModel\n (so, it could be a submodel), and generate a set with its model and all the sub-models in the tree.\n I.e. if you pass a field that was declared to be of type ``Foo`` (subclass of BaseModel) as ``field``, and that\n model ``Foo`` has a field of type ``Bar`` (also subclass of ``BaseModel``) and that model ``Bar`` has a field of\n type ``Baz`` (also subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.\n\n :param field: a Pydantic ``ModelField``\n :param known_models: used to solve circular references\n :return: a set with the model used in the declaration for this field, if any, and all its sub-models\n \"\"\"\n from .dataclasses import dataclass, is_builtin_dataclass\n from .main import BaseModel\n\n flat_models: TypeModelSet = set()\n\n # Handle dataclass-based models\n if is_builtin_dataclass(field.type_):\n field.type_ = dataclass(field.type_)\n field_type = field.type_\n if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel):\n field_type = field_type.__pydantic_model__\n if field.sub_fields and not lenient_issubclass(field_type, BaseModel):\n flat_models |= get_flat_models_from_fields(field.sub_fields, known_models=known_models)\n elif lenient_issubclass(field_type, BaseModel) and field_type not in known_models:\n flat_models |= get_flat_models_from_model(field_type, known_models=known_models)\n elif lenient_issubclass(field_type, Enum):\n flat_models.add(field_type)\n return flat_models\n\n\ndef get_flat_models_from_fields(fields: Sequence[ModelField], known_models: TypeModelSet) -> TypeModelSet:\n \"\"\"\n Take a list of Pydantic ``ModelField``s (from a model) that could have been declared as subclasses of ``BaseModel``\n (so, any of them could be a submodel), and generate a set with their models and all the sub-models in the tree.\n I.e. if you pass a the fields of a model ``Foo`` (subclass of ``BaseModel``) as ``fields``, and on of them has a\n field of type ``Bar`` (also subclass of ``BaseModel``) and that model ``Bar`` has a field of type ``Baz`` (also\n subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.\n\n :param fields: a list of Pydantic ``ModelField``s\n :param known_models: used to solve circular references\n :return: a set with any model declared in the fields, and all their sub-models\n \"\"\"\n flat_models: TypeModelSet = set()\n for field in fields:\n flat_models |= get_flat_models_from_field(field, known_models=known_models)\n return flat_models\n\n\ndef get_flat_models_from_models(models: Sequence[Type['BaseModel']]) -> TypeModelSet:\n \"\"\"\n Take a list of ``models`` and generate a set with them and all their sub-models in their trees. I.e. if you pass\n a list of two models, ``Foo`` and ``Bar``, both subclasses of Pydantic ``BaseModel`` as models, and ``Bar`` has\n a field of type ``Baz`` (also subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.\n \"\"\"\n flat_models: TypeModelSet = set()\n for model in models:\n flat_models |= get_flat_models_from_model(model)\n return flat_models\n\n\ndef get_long_model_name(model: TypeModelOrEnum) -> str:\n return f'{model.__module__}__{model.__qualname__}'.replace('.', '__')\n\n\ndef field_type_schema(\n field: ModelField,\n *,\n by_alias: bool,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_template: str,\n schema_overrides: bool = False,\n ref_prefix: Optional[str] = None,\n known_models: TypeModelSet,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n Used by ``field_schema()``, you probably should be using that function.\n\n Take a single ``field`` and generate the schema for its type only, not including additional\n information as title, etc. Also return additional schema definitions, from sub-models.\n \"\"\"\n from .main import BaseModel # noqa: F811\n\n definitions = {}\n nested_models: Set[str] = set()\n f_schema: Dict[str, Any]\n if field.shape in {\n SHAPE_LIST,\n SHAPE_TUPLE_ELLIPSIS,\n SHAPE_SEQUENCE,\n SHAPE_SET,\n SHAPE_FROZENSET,\n SHAPE_ITERABLE,\n SHAPE_DEQUE,\n }:\n items_schema, f_definitions, f_nested_models = field_singleton_schema(\n field,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(f_definitions)\n nested_models.update(f_nested_models)\n f_schema = {'type': 'array', 'items': items_schema}\n if field.shape in {SHAPE_SET, SHAPE_FROZENSET}:\n f_schema['uniqueItems'] = True\n\n elif field.shape in MAPPING_LIKE_SHAPES:\n f_schema = {'type': 'object'}\n key_field = cast(ModelField, field.key_field)\n regex = getattr(key_field.type_, 'regex', None)\n items_schema, f_definitions, f_nested_models = field_singleton_schema(\n field,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(f_definitions)\n nested_models.update(f_nested_models)\n if regex:\n # Dict keys have a regex pattern\n # items_schema might be a schema or empty dict, add it either way\n f_schema['patternProperties'] = {regex.pattern: items_schema}\n elif items_schema:\n # The dict values are not simply Any, so they need a schema\n f_schema['additionalProperties'] = items_schema\n elif field.shape == SHAPE_TUPLE or (field.shape == SHAPE_GENERIC and not issubclass(field.type_, BaseModel)):\n sub_schema = []\n sub_fields = cast(List[ModelField], field.sub_fields)\n for sf in sub_fields:\n sf_schema, sf_definitions, sf_nested_models = field_type_schema(\n sf,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(sf_definitions)\n nested_models.update(sf_nested_models)\n sub_schema.append(sf_schema)\n if len(sub_schema) == 1:\n if field.shape == SHAPE_GENERIC:\n f_schema = sub_schema[0]\n else:\n f_schema = {'type': 'array', 'items': sub_schema[0]}\n else:\n f_schema = {'type': 'array', 'items': sub_schema}\n if field.shape == SHAPE_GENERIC:\n f_schema = {'allOf': [f_schema]}\n else:\n assert field.shape in {SHAPE_SINGLETON, SHAPE_GENERIC}, field.shape\n f_schema, f_definitions, f_nested_models = field_singleton_schema(\n field,\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(f_definitions)\n nested_models.update(f_nested_models)\n\n # check field type to avoid repeated calls to the same __modify_schema__ method\n if field.type_ != field.outer_type_:\n if field.shape == SHAPE_GENERIC:\n field_type = field.type_\n else:\n field_type = field.outer_type_\n modify_schema = getattr(field_type, '__modify_schema__', None)\n if modify_schema:\n modify_schema(f_schema)\n return f_schema, definitions, nested_models\n\n\ndef model_process_schema(\n model: TypeModelOrEnum,\n *,\n by_alias: bool = True,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_prefix: Optional[str] = None,\n ref_template: str = default_ref_template,\n known_models: TypeModelSet = None,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n Used by ``model_schema()``, you probably should be using that function.\n\n Take a single ``model`` and generate its schema. Also return additional schema definitions, from sub-models. The\n sub-models of the returned schema will be referenced, but their definitions will not be included in the schema. All\n the definitions are returned as the second value.\n \"\"\"\n from inspect import getdoc, signature\n\n known_models = known_models or set()\n if lenient_issubclass(model, Enum):\n model = cast(Type[Enum], model)\n s = enum_process_schema(model)\n return s, {}, set()\n model = cast(Type['BaseModel'], model)\n s = {'title': model.__config__.title or model.__name__}\n doc = getdoc(model)\n if doc:\n s['description'] = doc\n known_models.add(model)\n m_schema, m_definitions, nested_models = model_type_schema(\n model,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n s.update(m_schema)\n schema_extra = model.__config__.schema_extra\n if callable(schema_extra):\n if len(signature(schema_extra).parameters) == 1:\n schema_extra(s)\n else:\n schema_extra(s, model)\n else:\n s.update(schema_extra)\n return s, m_definitions, nested_models\n\n\ndef model_type_schema(\n model: Type['BaseModel'],\n *,\n by_alias: bool,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_template: str,\n ref_prefix: Optional[str] = None,\n known_models: TypeModelSet,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n You probably should be using ``model_schema()``, this function is indirectly used by that function.\n\n Take a single ``model`` and generate the schema for its type only, not including additional\n information as title, etc. Also return additional schema definitions, from sub-models.\n \"\"\"\n properties = {}\n required = []\n definitions: Dict[str, Any] = {}\n nested_models: Set[str] = set()\n for k, f in model.__fields__.items():\n try:\n f_schema, f_definitions, f_nested_models = field_schema(\n f,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n except SkipField as skip:\n warnings.warn(skip.message, UserWarning)\n continue\n definitions.update(f_definitions)\n nested_models.update(f_nested_models)\n if by_alias:\n properties[f.alias] = f_schema\n if f.required:\n required.append(f.alias)\n else:\n properties[k] = f_schema\n if f.required:\n required.append(k)\n if ROOT_KEY in properties:\n out_schema = properties[ROOT_KEY]\n out_schema['title'] = model.__config__.title or model.__name__\n else:\n out_schema = {'type': 'object', 'properties': properties}\n if required:\n out_schema['required'] = required\n if model.__config__.extra == 'forbid':\n out_schema['additionalProperties'] = False\n return out_schema, definitions, nested_models\n\n\ndef enum_process_schema(enum: Type[Enum]) -> Dict[str, Any]:\n \"\"\"\n Take a single `enum` and generate its schema.\n\n This is similar to the `model_process_schema` function, but applies to ``Enum`` objects.\n \"\"\"\n from inspect import getdoc\n\n schema_: Dict[str, Any] = {\n 'title': enum.__name__,\n # Python assigns all enums a default docstring value of 'An enumeration', so\n # all enums will have a description field even if not explicitly provided.\n 'description': getdoc(enum),\n # Add enum values and the enum field type to the schema.\n 'enum': [item.value for item in cast(Iterable[Enum], enum)],\n }\n\n add_field_type_to_schema(enum, schema_)\n\n modify_schema = getattr(enum, '__modify_schema__', None)\n if modify_schema:\n modify_schema(schema_)\n\n return schema_\n\n\ndef field_singleton_sub_fields_schema(\n sub_fields: Sequence[ModelField],\n *,\n by_alias: bool,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_template: str,\n schema_overrides: bool = False,\n ref_prefix: Optional[str] = None,\n known_models: TypeModelSet,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n This function is indirectly used by ``field_schema()``, you probably should be using that function.\n\n Take a list of Pydantic ``ModelField`` from the declaration of a type with parameters, and generate their\n schema. I.e., fields used as \"type parameters\", like ``str`` and ``int`` in ``Tuple[str, int]``.\n \"\"\"\n definitions = {}\n nested_models: Set[str] = set()\n if len(sub_fields) == 1:\n return field_type_schema(\n sub_fields[0],\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n else:\n sub_field_schemas = []\n for sf in sub_fields:\n sub_schema, sub_definitions, sub_nested_models = field_type_schema(\n sf,\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(sub_definitions)\n if schema_overrides and 'allOf' in sub_schema:\n # if the sub_field is a referenced schema we only need the referenced\n # object. Otherwise we will end up with several allOf inside anyOf.\n # See https://github.com/samuelcolvin/pydantic/issues/1209\n sub_schema = sub_schema['allOf'][0]\n sub_field_schemas.append(sub_schema)\n nested_models.update(sub_nested_models)\n return {'anyOf': sub_field_schemas}, definitions, nested_models\n\n\n# Order is important, e.g. subclasses of str must go before str\n# this is used only for standard library types, custom types should use __modify_schema__ instead\nfield_class_to_schema: Tuple[Tuple[Any, Dict[str, Any]], ...] = (\n (Path, {'type': 'string', 'format': 'path'}),\n (datetime, {'type': 'string', 'format': 'date-time'}),\n (date, {'type': 'string', 'format': 'date'}),\n (time, {'type': 'string', 'format': 'time'}),\n (timedelta, {'type': 'number', 'format': 'time-delta'}),\n (IPv4Network, {'type': 'string', 'format': 'ipv4network'}),\n (IPv6Network, {'type': 'string', 'format': 'ipv6network'}),\n (IPv4Interface, {'type': 'string', 'format': 'ipv4interface'}),\n (IPv6Interface, {'type': 'string', 'format': 'ipv6interface'}),\n (IPv4Address, {'type': 'string', 'format': 'ipv4'}),\n (IPv6Address, {'type': 'string', 'format': 'ipv6'}),\n (Pattern, {'type': 'string', 'format': 'regex'}),\n (str, {'type': 'string'}),\n (bytes, {'type': 'string', 'format': 'binary'}),\n (bool, {'type': 'boolean'}),\n (int, {'type': 'integer'}),\n (float, {'type': 'number'}),\n (Decimal, {'type': 'number'}),\n (UUID, {'type': 'string', 'format': 'uuid'}),\n (dict, {'type': 'object'}),\n (list, {'type': 'array', 'items': {}}),\n (tuple, {'type': 'array', 'items': {}}),\n (set, {'type': 'array', 'items': {}, 'uniqueItems': True}),\n (frozenset, {'type': 'array', 'items': {}, 'uniqueItems': True}),\n)\n\njson_scheme = {'type': 'string', 'format': 'json-string'}\n\n\ndef add_field_type_to_schema(field_type: Any, schema_: Dict[str, Any]) -> None:\n \"\"\"\n Update the given `schema` with the type-specific metadata for the given `field_type`.\n\n This function looks through `field_class_to_schema` for a class that matches the given `field_type`,\n and then modifies the given `schema` with the information from that type.\n \"\"\"\n for type_, t_schema in field_class_to_schema:\n # Fallback for `typing.Pattern` as it is not a valid class\n if lenient_issubclass(field_type, type_) or field_type is type_ is Pattern:\n schema_.update(t_schema)\n break\n\n\ndef get_schema_ref(name: str, ref_prefix: Optional[str], ref_template: str, schema_overrides: bool) -> Dict[str, Any]:\n if ref_prefix:\n schema_ref = {'$ref': ref_prefix + name}\n else:\n schema_ref = {'$ref': ref_template.format(model=name)}\n return {'allOf': [schema_ref]} if schema_overrides else schema_ref\n\n\ndef field_singleton_schema( # noqa: C901 (ignore complexity)\n field: ModelField,\n *,\n by_alias: bool,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_template: str,\n schema_overrides: bool = False,\n ref_prefix: Optional[str] = None,\n known_models: TypeModelSet,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n This function is indirectly used by ``field_schema()``, you should probably be using that function.\n\n Take a single Pydantic ``ModelField``, and return its schema and any additional definitions from sub-models.\n \"\"\"\n from .main import BaseModel\n\n definitions: Dict[str, Any] = {}\n nested_models: Set[str] = set()\n field_type = field.type_\n\n # Recurse into this field if it contains sub_fields and is NOT a\n # BaseModel OR that BaseModel is a const\n if field.sub_fields and (\n (field.field_info and field.field_info.const) or not lenient_issubclass(field_type, BaseModel)\n ):\n return field_singleton_sub_fields_schema(\n field.sub_fields,\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n if field_type is Any or field_type is object or field_type.__class__ == TypeVar:\n return {}, definitions, nested_models # no restrictions\n if is_none_type(field_type):\n return {'type': 'null'}, definitions, nested_models\n if is_callable_type(field_type):\n raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.')\n f_schema: Dict[str, Any] = {}\n if field.field_info is not None and field.field_info.const:\n f_schema['const'] = field.default\n\n if is_literal_type(field_type):\n values = all_literal_values(field_type)\n\n if len({v.__class__ for v in values}) > 1:\n return field_schema(\n multitypes_literal_field_for_schema(values, field),\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n\n # All values have the same type\n field_type = values[0].__class__\n f_schema['enum'] = list(values)\n add_field_type_to_schema(field_type, f_schema)\n elif lenient_issubclass(field_type, Enum):\n enum_name = model_name_map[field_type]\n f_schema, schema_overrides = get_field_info_schema(field)\n f_schema.update(get_schema_ref(enum_name, ref_prefix, ref_template, schema_overrides))\n definitions[enum_name] = enum_process_schema(field_type)\n elif is_namedtuple(field_type):\n sub_schema, *_ = model_process_schema(\n field_type.__pydantic_model__,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n f_schema.update({'type': 'array', 'items': list(sub_schema['properties'].values())})\n elif not hasattr(field_type, '__pydantic_model__'):\n add_field_type_to_schema(field_type, f_schema)\n\n modify_schema = getattr(field_type, '__modify_schema__', None)\n if modify_schema:\n modify_schema(f_schema)\n\n if f_schema:\n return f_schema, definitions, nested_models\n\n # Handle dataclass-based models\n if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel):\n field_type = field_type.__pydantic_model__\n\n if issubclass(field_type, BaseModel):\n model_name = model_name_map[field_type]\n if field_type not in known_models:\n sub_schema, sub_definitions, sub_nested_models = model_process_schema(\n field_type,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(sub_definitions)\n definitions[model_name] = sub_schema\n nested_models.update(sub_nested_models)\n else:\n nested_models.add(model_name)\n schema_ref = get_schema_ref(model_name, ref_prefix, ref_template, schema_overrides)\n return schema_ref, definitions, nested_models\n\n # For generics with no args\n args = get_args(field_type)\n if args is not None and not args and Generic in field_type.__bases__:\n return f_schema, definitions, nested_models\n\n raise ValueError(f'Value not declarable with JSON Schema, field: {field}')\n\n\ndef multitypes_literal_field_for_schema(values: Tuple[Any, ...], field: ModelField) -> ModelField:\n \"\"\"\n To support `Literal` with values of different types, we split it into multiple `Literal` with same type\n e.g. `Literal['qwe', 'asd', 1, 2]` becomes `Union[Literal['qwe', 'asd'], Literal[1, 2]]`\n \"\"\"\n literal_distinct_types = defaultdict(list)\n for v in values:\n literal_distinct_types[v.__class__].append(v)\n distinct_literals = (Literal[tuple(same_type_values)] for same_type_values in literal_distinct_types.values())\n\n return ModelField(\n name=field.name,\n type_=Union[tuple(distinct_literals)], # type: ignore\n class_validators=field.class_validators,\n model_config=field.model_config,\n default=field.default,\n required=field.required,\n alias=field.alias,\n field_info=field.field_info,\n )\n\n\ndef encode_default(dft: Any) -> Any:\n if isinstance(dft, (int, float, str)):\n return dft\n elif sequence_like(dft):\n t = dft.__class__\n seq_args = (encode_default(v) for v in dft)\n return t(*seq_args) if is_namedtuple(t) else t(seq_args)\n elif isinstance(dft, dict):\n return {encode_default(k): encode_default(v) for k, v in dft.items()}\n elif dft is None:\n return None\n else:\n return pydantic_encoder(dft)\n\n\n_map_types_constraint: Dict[Any, Callable[..., type]] = {int: conint, float: confloat, Decimal: condecimal}\n\n\ndef get_annotation_from_field_info(\n annotation: Any, field_info: FieldInfo, field_name: str, validate_assignment: bool = False\n) -> Type[Any]:\n \"\"\"\n Get an annotation with validation implemented for numbers and strings based on the field_info.\n :param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr``\n :param field_info: an instance of FieldInfo, possibly with declarations for validations and JSON Schema\n :param field_name: name of the field for use in error messages\n :param validate_assignment: default False, flag for BaseModel Config value of validate_assignment\n :return: the same ``annotation`` if unmodified or a new annotation with validation in place\n \"\"\"\n constraints = field_info.get_constraints()\n\n used_constraints: Set[str] = set()\n if constraints:\n annotation, used_constraints = get_annotation_with_constraints(annotation, field_info)\n\n if validate_assignment:\n used_constraints.add('allow_mutation')\n\n unused_constraints = constraints - used_constraints\n if unused_constraints:\n raise ValueError(\n f'On field \"{field_name}\" the following field constraints are set but not enforced: '\n f'{\", \".join(unused_constraints)}. '\n f'\\nFor more details see https://pydantic-docs.helpmanual.io/usage/schema/#unenforced-field-constraints'\n )\n\n return annotation\n\n\ndef get_annotation_with_constraints(annotation: Any, field_info: FieldInfo) -> Tuple[Type[Any], Set[str]]: # noqa: C901\n \"\"\"\n Get an annotation with used constraints implemented for numbers and strings based on the field_info.\n\n :param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr``\n :param field_info: an instance of FieldInfo, possibly with declarations for validations and JSON Schema\n :return: the same ``annotation`` if unmodified or a new annotation along with the used constraints.\n \"\"\"\n used_constraints: Set[str] = set()\n\n def go(type_: Any) -> Type[Any]:\n if (\n is_literal_type(type_)\n or isinstance(type_, ForwardRef)\n or lenient_issubclass(type_, (ConstrainedList, ConstrainedSet))\n ):\n return type_\n origin = get_origin(type_)\n if origin is not None:\n args: Tuple[Any, ...] = get_args(type_)\n if any(isinstance(a, ForwardRef) for a in args):\n # forward refs cause infinite recursion below\n return type_\n\n if origin is Annotated:\n return go(args[0])\n if is_union_origin(origin):\n return Union[tuple(go(a) for a in args)] # type: ignore\n\n if issubclass(origin, List) and (field_info.min_items is not None or field_info.max_items is not None):\n used_constraints.update({'min_items', 'max_items'})\n return conlist(go(args[0]), min_items=field_info.min_items, max_items=field_info.max_items)\n\n if issubclass(origin, Set) and (field_info.min_items is not None or field_info.max_items is not None):\n used_constraints.update({'min_items', 'max_items'})\n return conset(go(args[0]), min_items=field_info.min_items, max_items=field_info.max_items)\n\n for t in (Tuple, List, Set, FrozenSet, Sequence):\n if issubclass(origin, t): # type: ignore\n return t[tuple(go(a) for a in args)] # type: ignore\n\n if issubclass(origin, Dict):\n return Dict[args[0], go(args[1])] # type: ignore\n\n attrs: Optional[Tuple[str, ...]] = None\n constraint_func: Optional[Callable[..., type]] = None\n if isinstance(type_, type):\n if issubclass(type_, (SecretStr, SecretBytes)):\n attrs = ('max_length', 'min_length')\n\n def constraint_func(**kw: Any) -> Type[Any]:\n return type(type_.__name__, (type_,), kw)\n\n elif issubclass(type_, str) and not issubclass(type_, (EmailStr, AnyUrl, ConstrainedStr)):\n attrs = ('max_length', 'min_length', 'regex')\n constraint_func = constr\n elif issubclass(type_, bytes):\n attrs = ('max_length', 'min_length', 'regex')\n constraint_func = conbytes\n elif issubclass(type_, numeric_types) and not issubclass(\n type_, (ConstrainedInt, ConstrainedFloat, ConstrainedDecimal, ConstrainedList, ConstrainedSet, bool)\n ):\n # Is numeric type\n attrs = ('gt', 'lt', 'ge', 'le', 'multiple_of')\n numeric_type = next(t for t in numeric_types if issubclass(type_, t)) # pragma: no branch\n constraint_func = _map_types_constraint[numeric_type]\n\n if attrs:\n used_constraints.update(set(attrs))\n kwargs = {\n attr_name: attr\n for attr_name, attr in ((attr_name, getattr(field_info, attr_name)) for attr_name in attrs)\n if attr is not None\n }\n if kwargs:\n constraint_func = cast(Callable[..., type], constraint_func)\n return constraint_func(**kwargs)\n return type_\n\n return go(annotation), used_constraints\n\n\ndef normalize_name(name: str) -> str:\n \"\"\"\n Normalizes the given name. This can be applied to either a model *or* enum.\n \"\"\"\n return re.sub(r'[^a-zA-Z0-9.\\-_]', '_', name)\n\n\nclass SkipField(Exception):\n \"\"\"\n Utility exception used to exclude fields from schema.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n self.message = message\n", "path": "pydantic/schema.py" } ]
[ { "content": "import re\nimport warnings\nfrom collections import defaultdict\nfrom datetime import date, datetime, time, timedelta\nfrom decimal import Decimal\nfrom enum import Enum\nfrom ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n FrozenSet,\n Generic,\n Iterable,\n List,\n Optional,\n Pattern,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n cast,\n)\nfrom uuid import UUID\n\nfrom typing_extensions import Annotated, Literal\n\nfrom .fields import (\n MAPPING_LIKE_SHAPES,\n SHAPE_DEQUE,\n SHAPE_FROZENSET,\n SHAPE_GENERIC,\n SHAPE_ITERABLE,\n SHAPE_LIST,\n SHAPE_SEQUENCE,\n SHAPE_SET,\n SHAPE_SINGLETON,\n SHAPE_TUPLE,\n SHAPE_TUPLE_ELLIPSIS,\n FieldInfo,\n ModelField,\n)\nfrom .json import pydantic_encoder\nfrom .networks import AnyUrl, EmailStr\nfrom .types import (\n ConstrainedDecimal,\n ConstrainedFloat,\n ConstrainedInt,\n ConstrainedList,\n ConstrainedSet,\n ConstrainedStr,\n SecretBytes,\n SecretStr,\n conbytes,\n condecimal,\n confloat,\n conint,\n conlist,\n conset,\n constr,\n)\nfrom .typing import (\n ForwardRef,\n all_literal_values,\n get_args,\n get_origin,\n is_callable_type,\n is_literal_type,\n is_namedtuple,\n is_none_type,\n is_union_origin,\n)\nfrom .utils import ROOT_KEY, get_model, lenient_issubclass, sequence_like\n\nif TYPE_CHECKING:\n from .dataclasses import Dataclass\n from .main import BaseModel\n\ndefault_prefix = '#/definitions/'\ndefault_ref_template = '#/definitions/{model}'\n\nTypeModelOrEnum = Union[Type['BaseModel'], Type[Enum]]\nTypeModelSet = Set[TypeModelOrEnum]\n\n\ndef schema(\n models: Sequence[Union[Type['BaseModel'], Type['Dataclass']]],\n *,\n by_alias: bool = True,\n title: Optional[str] = None,\n description: Optional[str] = None,\n ref_prefix: Optional[str] = None,\n ref_template: str = default_ref_template,\n) -> Dict[str, Any]:\n \"\"\"\n Process a list of models and generate a single JSON Schema with all of them defined in the ``definitions``\n top-level JSON key, including their sub-models.\n\n :param models: a list of models to include in the generated JSON Schema\n :param by_alias: generate the schemas using the aliases defined, if any\n :param title: title for the generated schema that includes the definitions\n :param description: description for the generated schema\n :param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the\n default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere\n else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the\n top-level key ``definitions``, so you can extract them from there. But all the references will have the set\n prefix.\n :param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful\n for references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For\n a sibling json file in a ``/schemas`` directory use ``\"/schemas/${model}.json#\"``.\n :return: dict with the JSON Schema with a ``definitions`` top-level key including the schema definitions for\n the models and sub-models passed in ``models``.\n \"\"\"\n clean_models = [get_model(model) for model in models]\n flat_models = get_flat_models_from_models(clean_models)\n model_name_map = get_model_name_map(flat_models)\n definitions = {}\n output_schema: Dict[str, Any] = {}\n if title:\n output_schema['title'] = title\n if description:\n output_schema['description'] = description\n for model in clean_models:\n m_schema, m_definitions, m_nested_models = model_process_schema(\n model,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n )\n definitions.update(m_definitions)\n model_name = model_name_map[model]\n definitions[model_name] = m_schema\n if definitions:\n output_schema['definitions'] = definitions\n return output_schema\n\n\ndef model_schema(\n model: Union[Type['BaseModel'], Type['Dataclass']],\n by_alias: bool = True,\n ref_prefix: Optional[str] = None,\n ref_template: str = default_ref_template,\n) -> Dict[str, Any]:\n \"\"\"\n Generate a JSON Schema for one model. With all the sub-models defined in the ``definitions`` top-level\n JSON key.\n\n :param model: a Pydantic model (a class that inherits from BaseModel)\n :param by_alias: generate the schemas using the aliases defined, if any\n :param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the\n default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere\n else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the\n top-level key ``definitions``, so you can extract them from there. But all the references will have the set\n prefix.\n :param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful for\n references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For a\n sibling json file in a ``/schemas`` directory use ``\"/schemas/${model}.json#\"``.\n :return: dict with the JSON Schema for the passed ``model``\n \"\"\"\n model = get_model(model)\n flat_models = get_flat_models_from_model(model)\n model_name_map = get_model_name_map(flat_models)\n model_name = model_name_map[model]\n m_schema, m_definitions, nested_models = model_process_schema(\n model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, ref_template=ref_template\n )\n if model_name in nested_models:\n # model_name is in Nested models, it has circular references\n m_definitions[model_name] = m_schema\n m_schema = get_schema_ref(model_name, ref_prefix, ref_template, False)\n if m_definitions:\n m_schema.update({'definitions': m_definitions})\n return m_schema\n\n\ndef get_field_info_schema(field: ModelField) -> Tuple[Dict[str, Any], bool]:\n schema_overrides = False\n\n # If no title is explicitly set, we don't set title in the schema for enums.\n # The behaviour is the same as `BaseModel` reference, where the default title\n # is in the definitions part of the schema.\n schema_: Dict[str, Any] = {}\n if field.field_info.title or not lenient_issubclass(field.type_, Enum):\n schema_['title'] = field.field_info.title or field.alias.title().replace('_', ' ')\n\n if field.field_info.title:\n schema_overrides = True\n\n if field.field_info.description:\n schema_['description'] = field.field_info.description\n schema_overrides = True\n\n if (\n not field.required\n and not field.field_info.const\n and field.default is not None\n and not is_callable_type(field.outer_type_)\n ):\n schema_['default'] = encode_default(field.default)\n schema_overrides = True\n\n return schema_, schema_overrides\n\n\ndef field_schema(\n field: ModelField,\n *,\n by_alias: bool = True,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_prefix: Optional[str] = None,\n ref_template: str = default_ref_template,\n known_models: TypeModelSet = None,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n Process a Pydantic field and return a tuple with a JSON Schema for it as the first item.\n Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field\n is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they\n will be included in the definitions and referenced in the schema instead of included recursively.\n\n :param field: a Pydantic ``ModelField``\n :param by_alias: use the defined alias (if any) in the returned schema\n :param model_name_map: used to generate the JSON Schema references to other models included in the definitions\n :param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of\n #/definitions/ will be used\n :param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful for\n references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For a\n sibling json file in a ``/schemas`` directory use ``\"/schemas/${model}.json#\"``.\n :param known_models: used to solve circular references\n :return: tuple of the schema for this field and additional definitions\n \"\"\"\n s, schema_overrides = get_field_info_schema(field)\n\n validation_schema = get_field_schema_validations(field)\n if validation_schema:\n s.update(validation_schema)\n schema_overrides = True\n\n f_schema, f_definitions, f_nested_models = field_type_schema(\n field,\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models or set(),\n )\n # $ref will only be returned when there are no schema_overrides\n if '$ref' in f_schema:\n return f_schema, f_definitions, f_nested_models\n else:\n s.update(f_schema)\n return s, f_definitions, f_nested_models\n\n\nnumeric_types = (int, float, Decimal)\n_str_types_attrs: Tuple[Tuple[str, Union[type, Tuple[type, ...]], str], ...] = (\n ('max_length', numeric_types, 'maxLength'),\n ('min_length', numeric_types, 'minLength'),\n ('regex', str, 'pattern'),\n)\n\n_numeric_types_attrs: Tuple[Tuple[str, Union[type, Tuple[type, ...]], str], ...] = (\n ('gt', numeric_types, 'exclusiveMinimum'),\n ('lt', numeric_types, 'exclusiveMaximum'),\n ('ge', numeric_types, 'minimum'),\n ('le', numeric_types, 'maximum'),\n ('multiple_of', numeric_types, 'multipleOf'),\n)\n\n\ndef get_field_schema_validations(field: ModelField) -> Dict[str, Any]:\n \"\"\"\n Get the JSON Schema validation keywords for a ``field`` with an annotation of\n a Pydantic ``FieldInfo`` with validation arguments.\n \"\"\"\n f_schema: Dict[str, Any] = {}\n\n if lenient_issubclass(field.type_, Enum):\n # schema is already updated by `enum_process_schema`; just update with field extra\n if field.field_info.extra:\n f_schema.update(field.field_info.extra)\n return f_schema\n\n if lenient_issubclass(field.type_, (str, bytes)):\n for attr_name, t, keyword in _str_types_attrs:\n attr = getattr(field.field_info, attr_name, None)\n if isinstance(attr, t):\n f_schema[keyword] = attr\n if lenient_issubclass(field.type_, numeric_types) and not issubclass(field.type_, bool):\n for attr_name, t, keyword in _numeric_types_attrs:\n attr = getattr(field.field_info, attr_name, None)\n if isinstance(attr, t):\n f_schema[keyword] = attr\n if field.field_info is not None and field.field_info.const:\n f_schema['const'] = field.default\n if field.field_info.extra:\n f_schema.update(field.field_info.extra)\n modify_schema = getattr(field.outer_type_, '__modify_schema__', None)\n if modify_schema:\n modify_schema(f_schema)\n return f_schema\n\n\ndef get_model_name_map(unique_models: TypeModelSet) -> Dict[TypeModelOrEnum, str]:\n \"\"\"\n Process a set of models and generate unique names for them to be used as keys in the JSON Schema\n definitions. By default the names are the same as the class name. But if two models in different Python\n modules have the same name (e.g. \"users.Model\" and \"items.Model\"), the generated names will be\n based on the Python module path for those conflicting models to prevent name collisions.\n\n :param unique_models: a Python set of models\n :return: dict mapping models to names\n \"\"\"\n name_model_map = {}\n conflicting_names: Set[str] = set()\n for model in unique_models:\n model_name = normalize_name(model.__name__)\n if model_name in conflicting_names:\n model_name = get_long_model_name(model)\n name_model_map[model_name] = model\n elif model_name in name_model_map:\n conflicting_names.add(model_name)\n conflicting_model = name_model_map.pop(model_name)\n name_model_map[get_long_model_name(conflicting_model)] = conflicting_model\n name_model_map[get_long_model_name(model)] = model\n else:\n name_model_map[model_name] = model\n return {v: k for k, v in name_model_map.items()}\n\n\ndef get_flat_models_from_model(model: Type['BaseModel'], known_models: TypeModelSet = None) -> TypeModelSet:\n \"\"\"\n Take a single ``model`` and generate a set with itself and all the sub-models in the tree. I.e. if you pass\n model ``Foo`` (subclass of Pydantic ``BaseModel``) as ``model``, and it has a field of type ``Bar`` (also\n subclass of ``BaseModel``) and that model ``Bar`` has a field of type ``Baz`` (also subclass of ``BaseModel``),\n the return value will be ``set([Foo, Bar, Baz])``.\n\n :param model: a Pydantic ``BaseModel`` subclass\n :param known_models: used to solve circular references\n :return: a set with the initial model and all its sub-models\n \"\"\"\n known_models = known_models or set()\n flat_models: TypeModelSet = set()\n flat_models.add(model)\n known_models |= flat_models\n fields = cast(Sequence[ModelField], model.__fields__.values())\n flat_models |= get_flat_models_from_fields(fields, known_models=known_models)\n return flat_models\n\n\ndef get_flat_models_from_field(field: ModelField, known_models: TypeModelSet) -> TypeModelSet:\n \"\"\"\n Take a single Pydantic ``ModelField`` (from a model) that could have been declared as a sublcass of BaseModel\n (so, it could be a submodel), and generate a set with its model and all the sub-models in the tree.\n I.e. if you pass a field that was declared to be of type ``Foo`` (subclass of BaseModel) as ``field``, and that\n model ``Foo`` has a field of type ``Bar`` (also subclass of ``BaseModel``) and that model ``Bar`` has a field of\n type ``Baz`` (also subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.\n\n :param field: a Pydantic ``ModelField``\n :param known_models: used to solve circular references\n :return: a set with the model used in the declaration for this field, if any, and all its sub-models\n \"\"\"\n from .dataclasses import dataclass, is_builtin_dataclass\n from .main import BaseModel\n\n flat_models: TypeModelSet = set()\n\n # Handle dataclass-based models\n if is_builtin_dataclass(field.type_):\n field.type_ = dataclass(field.type_)\n field_type = field.type_\n if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel):\n field_type = field_type.__pydantic_model__\n if field.sub_fields and not lenient_issubclass(field_type, BaseModel):\n flat_models |= get_flat_models_from_fields(field.sub_fields, known_models=known_models)\n elif lenient_issubclass(field_type, BaseModel) and field_type not in known_models:\n flat_models |= get_flat_models_from_model(field_type, known_models=known_models)\n elif lenient_issubclass(field_type, Enum):\n flat_models.add(field_type)\n return flat_models\n\n\ndef get_flat_models_from_fields(fields: Sequence[ModelField], known_models: TypeModelSet) -> TypeModelSet:\n \"\"\"\n Take a list of Pydantic ``ModelField``s (from a model) that could have been declared as subclasses of ``BaseModel``\n (so, any of them could be a submodel), and generate a set with their models and all the sub-models in the tree.\n I.e. if you pass a the fields of a model ``Foo`` (subclass of ``BaseModel``) as ``fields``, and on of them has a\n field of type ``Bar`` (also subclass of ``BaseModel``) and that model ``Bar`` has a field of type ``Baz`` (also\n subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.\n\n :param fields: a list of Pydantic ``ModelField``s\n :param known_models: used to solve circular references\n :return: a set with any model declared in the fields, and all their sub-models\n \"\"\"\n flat_models: TypeModelSet = set()\n for field in fields:\n flat_models |= get_flat_models_from_field(field, known_models=known_models)\n return flat_models\n\n\ndef get_flat_models_from_models(models: Sequence[Type['BaseModel']]) -> TypeModelSet:\n \"\"\"\n Take a list of ``models`` and generate a set with them and all their sub-models in their trees. I.e. if you pass\n a list of two models, ``Foo`` and ``Bar``, both subclasses of Pydantic ``BaseModel`` as models, and ``Bar`` has\n a field of type ``Baz`` (also subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.\n \"\"\"\n flat_models: TypeModelSet = set()\n for model in models:\n flat_models |= get_flat_models_from_model(model)\n return flat_models\n\n\ndef get_long_model_name(model: TypeModelOrEnum) -> str:\n return f'{model.__module__}__{model.__qualname__}'.replace('.', '__')\n\n\ndef field_type_schema(\n field: ModelField,\n *,\n by_alias: bool,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_template: str,\n schema_overrides: bool = False,\n ref_prefix: Optional[str] = None,\n known_models: TypeModelSet,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n Used by ``field_schema()``, you probably should be using that function.\n\n Take a single ``field`` and generate the schema for its type only, not including additional\n information as title, etc. Also return additional schema definitions, from sub-models.\n \"\"\"\n from .main import BaseModel # noqa: F811\n\n definitions = {}\n nested_models: Set[str] = set()\n f_schema: Dict[str, Any]\n if field.shape in {\n SHAPE_LIST,\n SHAPE_TUPLE_ELLIPSIS,\n SHAPE_SEQUENCE,\n SHAPE_SET,\n SHAPE_FROZENSET,\n SHAPE_ITERABLE,\n SHAPE_DEQUE,\n }:\n items_schema, f_definitions, f_nested_models = field_singleton_schema(\n field,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(f_definitions)\n nested_models.update(f_nested_models)\n f_schema = {'type': 'array', 'items': items_schema}\n if field.shape in {SHAPE_SET, SHAPE_FROZENSET}:\n f_schema['uniqueItems'] = True\n\n elif field.shape in MAPPING_LIKE_SHAPES:\n f_schema = {'type': 'object'}\n key_field = cast(ModelField, field.key_field)\n regex = getattr(key_field.type_, 'regex', None)\n items_schema, f_definitions, f_nested_models = field_singleton_schema(\n field,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(f_definitions)\n nested_models.update(f_nested_models)\n if regex:\n # Dict keys have a regex pattern\n # items_schema might be a schema or empty dict, add it either way\n f_schema['patternProperties'] = {regex.pattern: items_schema}\n elif items_schema:\n # The dict values are not simply Any, so they need a schema\n f_schema['additionalProperties'] = items_schema\n elif field.shape == SHAPE_TUPLE or (field.shape == SHAPE_GENERIC and not issubclass(field.type_, BaseModel)):\n sub_schema = []\n sub_fields = cast(List[ModelField], field.sub_fields)\n for sf in sub_fields:\n sf_schema, sf_definitions, sf_nested_models = field_type_schema(\n sf,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(sf_definitions)\n nested_models.update(sf_nested_models)\n sub_schema.append(sf_schema)\n if len(sub_schema) == 1:\n if field.shape == SHAPE_GENERIC:\n f_schema = sub_schema[0]\n else:\n f_schema = {'type': 'array', 'items': sub_schema[0]}\n else:\n f_schema = {'type': 'array', 'items': sub_schema}\n if field.shape == SHAPE_GENERIC:\n f_schema = {'allOf': [f_schema]}\n else:\n assert field.shape in {SHAPE_SINGLETON, SHAPE_GENERIC}, field.shape\n f_schema, f_definitions, f_nested_models = field_singleton_schema(\n field,\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(f_definitions)\n nested_models.update(f_nested_models)\n\n # check field type to avoid repeated calls to the same __modify_schema__ method\n if field.type_ != field.outer_type_:\n if field.shape == SHAPE_GENERIC:\n field_type = field.type_\n else:\n field_type = field.outer_type_\n modify_schema = getattr(field_type, '__modify_schema__', None)\n if modify_schema:\n modify_schema(f_schema)\n return f_schema, definitions, nested_models\n\n\ndef model_process_schema(\n model: TypeModelOrEnum,\n *,\n by_alias: bool = True,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_prefix: Optional[str] = None,\n ref_template: str = default_ref_template,\n known_models: TypeModelSet = None,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n Used by ``model_schema()``, you probably should be using that function.\n\n Take a single ``model`` and generate its schema. Also return additional schema definitions, from sub-models. The\n sub-models of the returned schema will be referenced, but their definitions will not be included in the schema. All\n the definitions are returned as the second value.\n \"\"\"\n from inspect import getdoc, signature\n\n known_models = known_models or set()\n if lenient_issubclass(model, Enum):\n model = cast(Type[Enum], model)\n s = enum_process_schema(model)\n return s, {}, set()\n model = cast(Type['BaseModel'], model)\n s = {'title': model.__config__.title or model.__name__}\n doc = getdoc(model)\n if doc:\n s['description'] = doc\n known_models.add(model)\n m_schema, m_definitions, nested_models = model_type_schema(\n model,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n s.update(m_schema)\n schema_extra = model.__config__.schema_extra\n if callable(schema_extra):\n if len(signature(schema_extra).parameters) == 1:\n schema_extra(s)\n else:\n schema_extra(s, model)\n else:\n s.update(schema_extra)\n return s, m_definitions, nested_models\n\n\ndef model_type_schema(\n model: Type['BaseModel'],\n *,\n by_alias: bool,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_template: str,\n ref_prefix: Optional[str] = None,\n known_models: TypeModelSet,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n You probably should be using ``model_schema()``, this function is indirectly used by that function.\n\n Take a single ``model`` and generate the schema for its type only, not including additional\n information as title, etc. Also return additional schema definitions, from sub-models.\n \"\"\"\n properties = {}\n required = []\n definitions: Dict[str, Any] = {}\n nested_models: Set[str] = set()\n for k, f in model.__fields__.items():\n try:\n f_schema, f_definitions, f_nested_models = field_schema(\n f,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n except SkipField as skip:\n warnings.warn(skip.message, UserWarning)\n continue\n definitions.update(f_definitions)\n nested_models.update(f_nested_models)\n if by_alias:\n properties[f.alias] = f_schema\n if f.required:\n required.append(f.alias)\n else:\n properties[k] = f_schema\n if f.required:\n required.append(k)\n if ROOT_KEY in properties:\n out_schema = properties[ROOT_KEY]\n out_schema['title'] = model.__config__.title or model.__name__\n else:\n out_schema = {'type': 'object', 'properties': properties}\n if required:\n out_schema['required'] = required\n if model.__config__.extra == 'forbid':\n out_schema['additionalProperties'] = False\n return out_schema, definitions, nested_models\n\n\ndef enum_process_schema(enum: Type[Enum]) -> Dict[str, Any]:\n \"\"\"\n Take a single `enum` and generate its schema.\n\n This is similar to the `model_process_schema` function, but applies to ``Enum`` objects.\n \"\"\"\n from inspect import getdoc\n\n schema_: Dict[str, Any] = {\n 'title': enum.__name__,\n # Python assigns all enums a default docstring value of 'An enumeration', so\n # all enums will have a description field even if not explicitly provided.\n 'description': getdoc(enum),\n # Add enum values and the enum field type to the schema.\n 'enum': [item.value for item in cast(Iterable[Enum], enum)],\n }\n\n add_field_type_to_schema(enum, schema_)\n\n modify_schema = getattr(enum, '__modify_schema__', None)\n if modify_schema:\n modify_schema(schema_)\n\n return schema_\n\n\ndef field_singleton_sub_fields_schema(\n sub_fields: Sequence[ModelField],\n *,\n by_alias: bool,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_template: str,\n schema_overrides: bool = False,\n ref_prefix: Optional[str] = None,\n known_models: TypeModelSet,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n This function is indirectly used by ``field_schema()``, you probably should be using that function.\n\n Take a list of Pydantic ``ModelField`` from the declaration of a type with parameters, and generate their\n schema. I.e., fields used as \"type parameters\", like ``str`` and ``int`` in ``Tuple[str, int]``.\n \"\"\"\n definitions = {}\n nested_models: Set[str] = set()\n if len(sub_fields) == 1:\n return field_type_schema(\n sub_fields[0],\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n else:\n sub_field_schemas = []\n for sf in sub_fields:\n sub_schema, sub_definitions, sub_nested_models = field_type_schema(\n sf,\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(sub_definitions)\n if schema_overrides and 'allOf' in sub_schema:\n # if the sub_field is a referenced schema we only need the referenced\n # object. Otherwise we will end up with several allOf inside anyOf.\n # See https://github.com/samuelcolvin/pydantic/issues/1209\n sub_schema = sub_schema['allOf'][0]\n sub_field_schemas.append(sub_schema)\n nested_models.update(sub_nested_models)\n return {'anyOf': sub_field_schemas}, definitions, nested_models\n\n\n# Order is important, e.g. subclasses of str must go before str\n# this is used only for standard library types, custom types should use __modify_schema__ instead\nfield_class_to_schema: Tuple[Tuple[Any, Dict[str, Any]], ...] = (\n (Path, {'type': 'string', 'format': 'path'}),\n (datetime, {'type': 'string', 'format': 'date-time'}),\n (date, {'type': 'string', 'format': 'date'}),\n (time, {'type': 'string', 'format': 'time'}),\n (timedelta, {'type': 'number', 'format': 'time-delta'}),\n (IPv4Network, {'type': 'string', 'format': 'ipv4network'}),\n (IPv6Network, {'type': 'string', 'format': 'ipv6network'}),\n (IPv4Interface, {'type': 'string', 'format': 'ipv4interface'}),\n (IPv6Interface, {'type': 'string', 'format': 'ipv6interface'}),\n (IPv4Address, {'type': 'string', 'format': 'ipv4'}),\n (IPv6Address, {'type': 'string', 'format': 'ipv6'}),\n (Pattern, {'type': 'string', 'format': 'regex'}),\n (str, {'type': 'string'}),\n (bytes, {'type': 'string', 'format': 'binary'}),\n (bool, {'type': 'boolean'}),\n (int, {'type': 'integer'}),\n (float, {'type': 'number'}),\n (Decimal, {'type': 'number'}),\n (UUID, {'type': 'string', 'format': 'uuid'}),\n (dict, {'type': 'object'}),\n (list, {'type': 'array', 'items': {}}),\n (tuple, {'type': 'array', 'items': {}}),\n (set, {'type': 'array', 'items': {}, 'uniqueItems': True}),\n (frozenset, {'type': 'array', 'items': {}, 'uniqueItems': True}),\n)\n\njson_scheme = {'type': 'string', 'format': 'json-string'}\n\n\ndef add_field_type_to_schema(field_type: Any, schema_: Dict[str, Any]) -> None:\n \"\"\"\n Update the given `schema` with the type-specific metadata for the given `field_type`.\n\n This function looks through `field_class_to_schema` for a class that matches the given `field_type`,\n and then modifies the given `schema` with the information from that type.\n \"\"\"\n for type_, t_schema in field_class_to_schema:\n # Fallback for `typing.Pattern` as it is not a valid class\n if lenient_issubclass(field_type, type_) or field_type is type_ is Pattern:\n schema_.update(t_schema)\n break\n\n\ndef get_schema_ref(name: str, ref_prefix: Optional[str], ref_template: str, schema_overrides: bool) -> Dict[str, Any]:\n if ref_prefix:\n schema_ref = {'$ref': ref_prefix + name}\n else:\n schema_ref = {'$ref': ref_template.format(model=name)}\n return {'allOf': [schema_ref]} if schema_overrides else schema_ref\n\n\ndef field_singleton_schema( # noqa: C901 (ignore complexity)\n field: ModelField,\n *,\n by_alias: bool,\n model_name_map: Dict[TypeModelOrEnum, str],\n ref_template: str,\n schema_overrides: bool = False,\n ref_prefix: Optional[str] = None,\n known_models: TypeModelSet,\n) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:\n \"\"\"\n This function is indirectly used by ``field_schema()``, you should probably be using that function.\n\n Take a single Pydantic ``ModelField``, and return its schema and any additional definitions from sub-models.\n \"\"\"\n from .main import BaseModel\n\n definitions: Dict[str, Any] = {}\n nested_models: Set[str] = set()\n field_type = field.type_\n\n # Recurse into this field if it contains sub_fields and is NOT a\n # BaseModel OR that BaseModel is a const\n if field.sub_fields and (\n (field.field_info and field.field_info.const) or not lenient_issubclass(field_type, BaseModel)\n ):\n return field_singleton_sub_fields_schema(\n field.sub_fields,\n by_alias=by_alias,\n model_name_map=model_name_map,\n schema_overrides=schema_overrides,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n if field_type is Any or field_type is object or field_type.__class__ == TypeVar:\n return {}, definitions, nested_models # no restrictions\n if is_none_type(field_type):\n return {'type': 'null'}, definitions, nested_models\n if is_callable_type(field_type):\n raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.')\n f_schema: Dict[str, Any] = {}\n if field.field_info is not None and field.field_info.const:\n f_schema['const'] = field.default\n\n if is_literal_type(field_type):\n values = all_literal_values(field_type)\n\n if len({v.__class__ for v in values}) > 1:\n return field_schema(\n multitypes_literal_field_for_schema(values, field),\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n\n # All values have the same type\n field_type = values[0].__class__\n f_schema['enum'] = list(values)\n add_field_type_to_schema(field_type, f_schema)\n elif lenient_issubclass(field_type, Enum):\n enum_name = model_name_map[field_type]\n f_schema, schema_overrides = get_field_info_schema(field)\n f_schema.update(get_schema_ref(enum_name, ref_prefix, ref_template, schema_overrides))\n definitions[enum_name] = enum_process_schema(field_type)\n elif is_namedtuple(field_type):\n sub_schema, *_ = model_process_schema(\n field_type.__pydantic_model__,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n f_schema.update({'type': 'array', 'items': list(sub_schema['properties'].values())})\n elif not hasattr(field_type, '__pydantic_model__'):\n add_field_type_to_schema(field_type, f_schema)\n\n modify_schema = getattr(field_type, '__modify_schema__', None)\n if modify_schema:\n modify_schema(f_schema)\n\n if f_schema:\n return f_schema, definitions, nested_models\n\n # Handle dataclass-based models\n if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel):\n field_type = field_type.__pydantic_model__\n\n if issubclass(field_type, BaseModel):\n model_name = model_name_map[field_type]\n if field_type not in known_models:\n sub_schema, sub_definitions, sub_nested_models = model_process_schema(\n field_type,\n by_alias=by_alias,\n model_name_map=model_name_map,\n ref_prefix=ref_prefix,\n ref_template=ref_template,\n known_models=known_models,\n )\n definitions.update(sub_definitions)\n definitions[model_name] = sub_schema\n nested_models.update(sub_nested_models)\n else:\n nested_models.add(model_name)\n schema_ref = get_schema_ref(model_name, ref_prefix, ref_template, schema_overrides)\n return schema_ref, definitions, nested_models\n\n # For generics with no args\n args = get_args(field_type)\n if args is not None and not args and Generic in field_type.__bases__:\n return f_schema, definitions, nested_models\n\n raise ValueError(f'Value not declarable with JSON Schema, field: {field}')\n\n\ndef multitypes_literal_field_for_schema(values: Tuple[Any, ...], field: ModelField) -> ModelField:\n \"\"\"\n To support `Literal` with values of different types, we split it into multiple `Literal` with same type\n e.g. `Literal['qwe', 'asd', 1, 2]` becomes `Union[Literal['qwe', 'asd'], Literal[1, 2]]`\n \"\"\"\n literal_distinct_types = defaultdict(list)\n for v in values:\n literal_distinct_types[v.__class__].append(v)\n distinct_literals = (Literal[tuple(same_type_values)] for same_type_values in literal_distinct_types.values())\n\n return ModelField(\n name=field.name,\n type_=Union[tuple(distinct_literals)], # type: ignore\n class_validators=field.class_validators,\n model_config=field.model_config,\n default=field.default,\n required=field.required,\n alias=field.alias,\n field_info=field.field_info,\n )\n\n\ndef encode_default(dft: Any) -> Any:\n if isinstance(dft, Enum):\n return dft.value\n elif isinstance(dft, (int, float, str)):\n return dft\n elif sequence_like(dft):\n t = dft.__class__\n seq_args = (encode_default(v) for v in dft)\n return t(*seq_args) if is_namedtuple(t) else t(seq_args)\n elif isinstance(dft, dict):\n return {encode_default(k): encode_default(v) for k, v in dft.items()}\n elif dft is None:\n return None\n else:\n return pydantic_encoder(dft)\n\n\n_map_types_constraint: Dict[Any, Callable[..., type]] = {int: conint, float: confloat, Decimal: condecimal}\n\n\ndef get_annotation_from_field_info(\n annotation: Any, field_info: FieldInfo, field_name: str, validate_assignment: bool = False\n) -> Type[Any]:\n \"\"\"\n Get an annotation with validation implemented for numbers and strings based on the field_info.\n :param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr``\n :param field_info: an instance of FieldInfo, possibly with declarations for validations and JSON Schema\n :param field_name: name of the field for use in error messages\n :param validate_assignment: default False, flag for BaseModel Config value of validate_assignment\n :return: the same ``annotation`` if unmodified or a new annotation with validation in place\n \"\"\"\n constraints = field_info.get_constraints()\n\n used_constraints: Set[str] = set()\n if constraints:\n annotation, used_constraints = get_annotation_with_constraints(annotation, field_info)\n\n if validate_assignment:\n used_constraints.add('allow_mutation')\n\n unused_constraints = constraints - used_constraints\n if unused_constraints:\n raise ValueError(\n f'On field \"{field_name}\" the following field constraints are set but not enforced: '\n f'{\", \".join(unused_constraints)}. '\n f'\\nFor more details see https://pydantic-docs.helpmanual.io/usage/schema/#unenforced-field-constraints'\n )\n\n return annotation\n\n\ndef get_annotation_with_constraints(annotation: Any, field_info: FieldInfo) -> Tuple[Type[Any], Set[str]]: # noqa: C901\n \"\"\"\n Get an annotation with used constraints implemented for numbers and strings based on the field_info.\n\n :param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr``\n :param field_info: an instance of FieldInfo, possibly with declarations for validations and JSON Schema\n :return: the same ``annotation`` if unmodified or a new annotation along with the used constraints.\n \"\"\"\n used_constraints: Set[str] = set()\n\n def go(type_: Any) -> Type[Any]:\n if (\n is_literal_type(type_)\n or isinstance(type_, ForwardRef)\n or lenient_issubclass(type_, (ConstrainedList, ConstrainedSet))\n ):\n return type_\n origin = get_origin(type_)\n if origin is not None:\n args: Tuple[Any, ...] = get_args(type_)\n if any(isinstance(a, ForwardRef) for a in args):\n # forward refs cause infinite recursion below\n return type_\n\n if origin is Annotated:\n return go(args[0])\n if is_union_origin(origin):\n return Union[tuple(go(a) for a in args)] # type: ignore\n\n if issubclass(origin, List) and (field_info.min_items is not None or field_info.max_items is not None):\n used_constraints.update({'min_items', 'max_items'})\n return conlist(go(args[0]), min_items=field_info.min_items, max_items=field_info.max_items)\n\n if issubclass(origin, Set) and (field_info.min_items is not None or field_info.max_items is not None):\n used_constraints.update({'min_items', 'max_items'})\n return conset(go(args[0]), min_items=field_info.min_items, max_items=field_info.max_items)\n\n for t in (Tuple, List, Set, FrozenSet, Sequence):\n if issubclass(origin, t): # type: ignore\n return t[tuple(go(a) for a in args)] # type: ignore\n\n if issubclass(origin, Dict):\n return Dict[args[0], go(args[1])] # type: ignore\n\n attrs: Optional[Tuple[str, ...]] = None\n constraint_func: Optional[Callable[..., type]] = None\n if isinstance(type_, type):\n if issubclass(type_, (SecretStr, SecretBytes)):\n attrs = ('max_length', 'min_length')\n\n def constraint_func(**kw: Any) -> Type[Any]:\n return type(type_.__name__, (type_,), kw)\n\n elif issubclass(type_, str) and not issubclass(type_, (EmailStr, AnyUrl, ConstrainedStr)):\n attrs = ('max_length', 'min_length', 'regex')\n constraint_func = constr\n elif issubclass(type_, bytes):\n attrs = ('max_length', 'min_length', 'regex')\n constraint_func = conbytes\n elif issubclass(type_, numeric_types) and not issubclass(\n type_, (ConstrainedInt, ConstrainedFloat, ConstrainedDecimal, ConstrainedList, ConstrainedSet, bool)\n ):\n # Is numeric type\n attrs = ('gt', 'lt', 'ge', 'le', 'multiple_of')\n numeric_type = next(t for t in numeric_types if issubclass(type_, t)) # pragma: no branch\n constraint_func = _map_types_constraint[numeric_type]\n\n if attrs:\n used_constraints.update(set(attrs))\n kwargs = {\n attr_name: attr\n for attr_name, attr in ((attr_name, getattr(field_info, attr_name)) for attr_name in attrs)\n if attr is not None\n }\n if kwargs:\n constraint_func = cast(Callable[..., type], constraint_func)\n return constraint_func(**kwargs)\n return type_\n\n return go(annotation), used_constraints\n\n\ndef normalize_name(name: str) -> str:\n \"\"\"\n Normalizes the given name. This can be applied to either a model *or* enum.\n \"\"\"\n return re.sub(r'[^a-zA-Z0-9.\\-_]', '_', name)\n\n\nclass SkipField(Exception):\n \"\"\"\n Utility exception used to exclude fields from schema.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n self.message = message\n", "path": "pydantic/schema.py" } ]
diff --git a/changes/3190-joaommartins.md b/changes/3190-joaommartins.md new file mode 100644 index 00000000000..74af4f13a4e --- /dev/null +++ b/changes/3190-joaommartins.md @@ -0,0 +1 @@ +Always use `Enum` value as default in generated JSON schema. \ No newline at end of file diff --git a/pydantic/schema.py b/pydantic/schema.py index 581b248d370..7f44be924a1 100644 --- a/pydantic/schema.py +++ b/pydantic/schema.py @@ -909,7 +909,9 @@ def multitypes_literal_field_for_schema(values: Tuple[Any, ...], field: ModelFie def encode_default(dft: Any) -> Any: - if isinstance(dft, (int, float, str)): + if isinstance(dft, Enum): + return dft.value + elif isinstance(dft, (int, float, str)): return dft elif sequence_like(dft): t = dft.__class__ diff --git a/tests/test_schema.py b/tests/test_schema.py index cd4e2e4f8d7..7bdfcfebb60 100644 --- a/tests/test_schema.py +++ b/tests/test_schema.py @@ -1404,6 +1404,26 @@ class UserModel(BaseModel): } +def test_enum_str_default(): + class MyEnum(str, Enum): + FOO = 'foo' + + class UserModel(BaseModel): + friends: MyEnum = MyEnum.FOO + + assert UserModel.schema()['properties']['friends']['default'] is MyEnum.FOO.value + + +def test_enum_int_default(): + class MyEnum(IntEnum): + FOO = 1 + + class UserModel(BaseModel): + friends: MyEnum = MyEnum.FOO + + assert UserModel.schema()['properties']['friends']['default'] is MyEnum.FOO.value + + def test_dict_default(): class UserModel(BaseModel): friends: Dict[str, float] = {'a': 1.1, 'b': 2.2}
instadeepai__Mava-626
[TEST] Jax Datasets ### What do you want to test? Jax dataset components ### Outline of test structure * Unit tests * Test components and hooks ### Definition of done Passing checks, cover all hooks, edge cases considered ### Mandatory checklist before making a PR * [ ] The success criteria laid down in “Definition of done” are met. * [ ] Test code is documented - docstrings for methods and classes, static types for arguments. * [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.
[ { "content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Commonly used dataset components for system builders\"\"\"\nimport abc\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Optional\n\nimport reverb\nfrom acme import datasets\n\nfrom mava.components.jax import Component\nfrom mava.core_jax import SystemBuilder\n\nTransform = Callable[[reverb.ReplaySample], reverb.ReplaySample]\n\n\nclass TrainerDataset(Component):\n @abc.abstractmethod\n def __init__(\n self,\n config: Any,\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n @abc.abstractmethod\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n pass\n\n @staticmethod\n def name() -> str:\n \"\"\"_summary_\n\n Returns:\n _description_\n \"\"\"\n return \"trainer_dataset\"\n\n\n@dataclass\nclass TransitionDatasetConfig:\n sample_batch_size: int = 256\n prefetch_size: Optional[int] = None\n num_parallel_calls: int = 12\n max_in_flight_samples_per_worker: Optional[int] = None\n postprocess: Optional[Transform] = None\n # dataset_name: str = \"transition_dataset\"\n\n\nclass TransitionDataset(TrainerDataset):\n def __init__(\n self,\n config: TransitionDatasetConfig = TransitionDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n max_in_flight_samples_per_worker = self.config.max_in_flight_samples_per_worker\n dataset = datasets.make_reverb_dataset(\n table=builder.store.trainer_id,\n server_address=builder.store.data_server_client.server_address,\n batch_size=self.config.sample_batch_size,\n prefetch_size=self.config.prefetch_size,\n num_parallel_calls=self.config.num_parallel_calls,\n max_in_flight_samples_per_worker=max_in_flight_samples_per_worker,\n postprocess=self.config.postprocess,\n )\n\n builder.store.dataset = iter(dataset)\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TransitionDatasetConfig\n\n\n@dataclass\nclass TrajectoryDatasetConfig:\n sample_batch_size: int = 256\n max_in_flight_samples_per_worker: int = 512\n num_workers_per_iterator: int = -1\n max_samples_per_stream: int = -1\n rate_limiter_timeout_ms: int = -1\n get_signature_timeout_secs: Optional[int] = None\n # max_samples: int = -1\n # dataset_name: str = \"trajectory_dataset\"\n\n\nclass TrajectoryDataset(TrainerDataset):\n def __init__(\n self,\n config: TrajectoryDatasetConfig = TrajectoryDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n dataset = reverb.TrajectoryDataset.from_table_signature(\n server_address=builder.store.data_server_client.server_address,\n table=builder.store.trainer_id,\n max_in_flight_samples_per_worker=2 * self.config.sample_batch_size,\n num_workers_per_iterator=self.config.num_workers_per_iterator,\n max_samples_per_stream=self.config.max_samples_per_stream,\n rate_limiter_timeout_ms=self.config.rate_limiter_timeout_ms,\n get_signature_timeout_secs=self.config.get_signature_timeout_secs,\n # max_samples=self.config.max_samples,\n )\n\n # Add batch dimension.\n dataset = dataset.batch(self.config.sample_batch_size, drop_remainder=True)\n builder.store.sample_batch_size = self.config.sample_batch_size\n\n builder.store.dataset_iterator = dataset.as_numpy_iterator()\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TrajectoryDatasetConfig\n", "path": "mava/components/jax/building/datasets.py" } ]
[ { "content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Commonly used dataset components for system builders\"\"\"\nimport abc\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Optional\n\nimport reverb\nfrom acme import datasets\n\nfrom mava.components.jax import Component\nfrom mava.core_jax import SystemBuilder\n\nTransform = Callable[[reverb.ReplaySample], reverb.ReplaySample]\n\n\nclass TrainerDataset(Component):\n @abc.abstractmethod\n def __init__(\n self,\n config: Any,\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n @abc.abstractmethod\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n pass\n\n @staticmethod\n def name() -> str:\n \"\"\"_summary_\n\n Returns:\n _description_\n \"\"\"\n return \"trainer_dataset\"\n\n\n@dataclass\nclass TransitionDatasetConfig:\n sample_batch_size: int = 256\n prefetch_size: Optional[int] = None\n num_parallel_calls: int = 12\n max_in_flight_samples_per_worker: Optional[int] = None\n postprocess: Optional[Transform] = None\n # dataset_name: str = \"transition_dataset\"\n\n\nclass TransitionDataset(TrainerDataset):\n def __init__(\n self,\n config: TransitionDatasetConfig = TransitionDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n max_in_flight_samples_per_worker = self.config.max_in_flight_samples_per_worker\n dataset = datasets.make_reverb_dataset(\n table=builder.store.trainer_id,\n server_address=builder.store.data_server_client.server_address,\n batch_size=self.config.sample_batch_size,\n prefetch_size=self.config.prefetch_size,\n num_parallel_calls=self.config.num_parallel_calls,\n max_in_flight_samples_per_worker=max_in_flight_samples_per_worker,\n postprocess=self.config.postprocess,\n )\n\n builder.store.dataset_iterator = iter(dataset)\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TransitionDatasetConfig\n\n\n@dataclass\nclass TrajectoryDatasetConfig:\n sample_batch_size: int = 256\n max_in_flight_samples_per_worker: int = 512\n num_workers_per_iterator: int = -1\n max_samples_per_stream: int = -1\n rate_limiter_timeout_ms: int = -1\n get_signature_timeout_secs: Optional[int] = None\n # max_samples: int = -1\n # dataset_name: str = \"trajectory_dataset\"\n\n\nclass TrajectoryDataset(TrainerDataset):\n def __init__(\n self,\n config: TrajectoryDatasetConfig = TrajectoryDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n dataset = reverb.TrajectoryDataset.from_table_signature(\n server_address=builder.store.data_server_client.server_address,\n table=builder.store.trainer_id,\n max_in_flight_samples_per_worker=2 * self.config.sample_batch_size,\n num_workers_per_iterator=self.config.num_workers_per_iterator,\n max_samples_per_stream=self.config.max_samples_per_stream,\n rate_limiter_timeout_ms=self.config.rate_limiter_timeout_ms,\n get_signature_timeout_secs=self.config.get_signature_timeout_secs,\n # max_samples=self.config.max_samples,\n )\n\n # Add batch dimension.\n dataset = dataset.batch(self.config.sample_batch_size, drop_remainder=True)\n builder.store.sample_batch_size = self.config.sample_batch_size\n\n builder.store.dataset_iterator = dataset.as_numpy_iterator()\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TrajectoryDatasetConfig\n", "path": "mava/components/jax/building/datasets.py" } ]
diff --git a/mava/components/jax/building/datasets.py b/mava/components/jax/building/datasets.py index e60804f18..f5d038a84 100644 --- a/mava/components/jax/building/datasets.py +++ b/mava/components/jax/building/datasets.py @@ -98,7 +98,7 @@ def on_building_trainer_dataset(self, builder: SystemBuilder) -> None: postprocess=self.config.postprocess, ) - builder.store.dataset = iter(dataset) + builder.store.dataset_iterator = iter(dataset) @staticmethod def config_class() -> Optional[Callable]: diff --git a/tests/jax/components/building/datasets_test.py b/tests/jax/components/building/datasets_test.py new file mode 100644 index 000000000..aef6f4206 --- /dev/null +++ b/tests/jax/components/building/datasets_test.py @@ -0,0 +1,263 @@ +# python3 +# Copyright 2021 InstaDeep Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for TransitionDataset and TrajectoryDataset classes for Jax-based Mava""" + +from types import SimpleNamespace +from typing import Any, Callable, Dict + +import pytest +import reverb +from tensorflow.python.framework import dtypes, ops + +from mava import specs +from mava.adders import reverb as reverb_adders +from mava.components.jax.building.datasets import ( + TrajectoryDataset, + TrajectoryDatasetConfig, + TransitionDataset, + TransitionDatasetConfig, +) +from mava.systems.jax.builder import Builder +from tests.jax.mocks import make_fake_env_specs + +env_spec = make_fake_env_specs() +Transform = Callable[[reverb.ReplaySample], reverb.ReplaySample] + + +def adder_signature_fn( + ma_environment_spec: specs.MAEnvironmentSpec, + extras_specs: Dict[str, Any], +) -> Any: + """Signature function that helps in building a simple server""" + return reverb_adders.ParallelNStepTransitionAdder.signature( + ma_environment_spec=ma_environment_spec, extras_specs=extras_specs + ) + + +class MockBuilder(Builder): + def __init__(self) -> None: + """Creates a mock builder for testing""" + self.simple_server = reverb.Server( + [ + reverb.Table.queue( + name="table_0", + max_size=100, + signature=adder_signature_fn(env_spec, {}), + ) + ] + ) + data_server_client = SimpleNamespace( + server_address=f"localhost:{self.simple_server.port}" + ) + trainer_id = "table_0" + self.store = SimpleNamespace( + data_server_client=data_server_client, trainer_id=trainer_id + ) + + [email protected] +def mock_builder() -> MockBuilder: + """Create builder mock""" + return MockBuilder() + + [email protected] +def transition_dataset() -> TransitionDataset: + config = TransitionDatasetConfig() + config.sample_batch_size = 512 + config.prefetch_size = None + config.num_parallel_calls = 24 + config.max_in_flight_samples_per_worker = None + config.postprocess = None + + transition_dataset = TransitionDataset(config=config) + return transition_dataset + + [email protected] +def trajectory_dataset() -> TrajectoryDataset: + config = TrajectoryDatasetConfig() + config.sample_batch_size = 512 + config.max_in_flight_samples_per_worker = 1024 + config.num_workers_per_iterator = -2 + config.max_samples_per_stream = -2 + config.rate_limiter_timeout_ms = -2 + config.get_signature_timeout_secs = None + + trajectory_dataset = TrajectoryDataset(config=config) + return trajectory_dataset + + +def test_init_transition_dataset(transition_dataset: TransitionDataset) -> None: + """Test initiator of TransitionDataset component""" + assert transition_dataset.config.sample_batch_size == 512 + assert transition_dataset.config.prefetch_size is None + assert transition_dataset.config.num_parallel_calls == 24 + assert transition_dataset.config.max_in_flight_samples_per_worker is None + assert transition_dataset.config.postprocess is None + + +def test_on_building_trainer_dataset_transition_dataset_non_max_in_flight( + mock_builder: MockBuilder, +) -> None: + """Test on_building_trainer_dataset of TransitionDataset Component + Case max_in_flight_samples_per_worker is None and sample_batch_size not None + Args: + mock_builder: Builder + """ + transition_dataset = TransitionDataset() + transition_dataset.on_building_trainer_dataset(builder=mock_builder) + + # mock_builder.store.dataset_iterator._dataset._map_func._func(1)._dataset \ + # is needed to check the parameters i.e. obtain the dataset from the \ + # tf.data.Dataset dataset iterator + + dataset = mock_builder.store.dataset_iterator._dataset._map_func._func(1)._dataset + assert ( + dataset._input_dataset._server_address + == mock_builder.store.data_server_client.server_address + ) + assert dataset._input_dataset._table == mock_builder.store.trainer_id + assert dataset._batch_size == transition_dataset.config.sample_batch_size + assert ( + dataset._input_dataset._max_in_flight_samples_per_worker + == 2 * transition_dataset.config.sample_batch_size + ) + assert ( + mock_builder.store.dataset_iterator._dataset._num_parallel_calls + == ops.convert_to_tensor( + transition_dataset.config.num_parallel_calls, + dtype=dtypes.int64, + name="num_parallel_calls", + ) + ) + + +def test_on_building_trainer_dataset_transition_dataset_non_max_in_flight_non_batch( + mock_builder: MockBuilder, +) -> None: + """Test on_building_trainer_dataset of TransitionDataset Component + Case max_in_flight_samples_per_worker is None and sample_batch_size is None + Args: + mock_builder: Builder + """ + transition_dataset = TransitionDataset() + transition_dataset.config.sample_batch_size = None + transition_dataset.on_building_trainer_dataset(builder=mock_builder) + + dataset = mock_builder.store.dataset_iterator._dataset._map_func._func(1) + assert ( + dataset._server_address == mock_builder.store.data_server_client.server_address + ) + assert dataset._table == mock_builder.store.trainer_id + assert dataset._max_in_flight_samples_per_worker == 100 + assert ( + mock_builder.store.dataset_iterator._dataset._num_parallel_calls + == ops.convert_to_tensor( + transition_dataset.config.num_parallel_calls, + dtype=dtypes.int64, + name="num_parallel_calls", + ) + ) + + +def test_on_building_trainer_dataset_transition_dataset( + mock_builder: MockBuilder, +) -> None: + """Test on_building_trainer_dataset of TransitionDataset Component + With max_in_flight_samples_per_worker and with sample_batch_size + Args: + mock_builder: Builder + """ + transition_dataset = TransitionDataset() + transition_dataset.config.sample_batch_size = 512 + transition_dataset.config.max_in_flight_samples_per_worker = 120 + transition_dataset.on_building_trainer_dataset(builder=mock_builder) + + # mock_builder.store.dataset_iterator._dataset._map_func._func(1)._dataset \ + # is needed to check the parameters i.e. obtain the dataset from the \ + # tf.data.Dataset dataset iterator + + dataset = mock_builder.store.dataset_iterator._dataset._map_func._func(1)._dataset + assert ( + dataset._input_dataset._server_address + == mock_builder.store.data_server_client.server_address + ) + assert dataset._input_dataset._table == mock_builder.store.trainer_id + assert dataset._batch_size == transition_dataset.config.sample_batch_size + assert dataset._input_dataset._max_in_flight_samples_per_worker == 120 + assert ( + mock_builder.store.dataset_iterator._dataset._num_parallel_calls + == ops.convert_to_tensor( + transition_dataset.config.num_parallel_calls, + dtype=dtypes.int64, + name="num_parallel_calls", + ) + ) + + +def test_init_trajectory_dataset(trajectory_dataset: TrajectoryDataset) -> None: + """Test initiator of TrajectoryDataset component""" + assert trajectory_dataset.config.sample_batch_size == 512 + assert trajectory_dataset.config.max_in_flight_samples_per_worker == 1024 + assert trajectory_dataset.config.num_workers_per_iterator == -2 + assert trajectory_dataset.config.max_samples_per_stream == -2 + assert trajectory_dataset.config.rate_limiter_timeout_ms == -2 + assert trajectory_dataset.config.get_signature_timeout_secs is None + + +def test_on_building_trainer_dataset_trajectory_dataset( + mock_builder: MockBuilder, +) -> None: + """Test on_building_trainer_dataset of TrajectoryDataset Component + + Args: + mock_builder: Builder + """ + trajectory_dataset = TrajectoryDataset() + trajectory_dataset.on_building_trainer_dataset(builder=mock_builder) + + assert ( + mock_builder.store.sample_batch_size + == trajectory_dataset.config.sample_batch_size + ) + + # mock_builder.store.dataset_iterator._iterator._dataset is needed \ + # to check the parameters i.e. obtain the dataset from the numpy \ + # dataset iterator + + dataset = mock_builder.store.dataset_iterator._iterator._dataset + assert ( + dataset._input_dataset._server_address + == mock_builder.store.data_server_client.server_address + ) + assert dataset._input_dataset._table == mock_builder.store.trainer_id + assert ( + dataset._input_dataset._max_in_flight_samples_per_worker + == 2 * trajectory_dataset.config.sample_batch_size + ) + assert ( + dataset._input_dataset._num_workers_per_iterator + == trajectory_dataset.config.num_workers_per_iterator + ) + assert ( + dataset._input_dataset._max_samples_per_stream + == trajectory_dataset.config.max_samples_per_stream + ) + assert ( + dataset._input_dataset._rate_limiter_timeout_ms + == trajectory_dataset.config.rate_limiter_timeout_ms + )
conan-io__conan-4324
tools.environment_append raises if tries to unset variable which was never set after #4224, I may use the following code, for instance, to ensure variable is not set: ``` with environment_append({'CONAN_BASH_PATH': None}): pass ``` however, it raises if `CONAN_BASH_PATH` is not set (prior to the environment_append invocation): ``` Traceback (most recent call last): File "C:\bincrafters\conan\conans\test\unittests\client\tools\os_info\osinfo_test.py", line 39, in test_windows with environment_append(new_env): File "c:\users\sse4\appdata\local\programs\python\python36\lib\contextlib.py", line 81, in __enter__ return next(self.gen) File "C:\bincrafters\conan\conans\client\tools\env.py", line 57, in environment_append os.environ.pop(var) File "c:\users\sse4\appdata\local\programs\python\python36\lib\_collections_abc.py", line 795, in pop value = self[key] File "c:\users\sse4\appdata\local\programs\python\python36\lib\os.py", line 669, in __getitem__ raise KeyError(key) from None KeyError: 'CONAN_BASH_PATH' ``` I would expect `tools.environment_append` to be no op in such case, otherwise, it requires additional logic to workaround this behavior. To help us debug your issue please explain: - [ ] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md). - [ ] I've specified the Conan version, operating system version and any tool that can be relevant. - [ ] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
[ { "content": "import os\nimport sys\nfrom contextlib import contextmanager\n\nfrom conans.client.run_environment import RunEnvironment\nfrom conans.client.tools.files import _path_equals, which\nfrom conans.errors import ConanException\n\n\n@contextmanager\ndef pythonpath(conanfile):\n python_path = conanfile.env.get(\"PYTHONPATH\", None)\n if python_path:\n old_path = sys.path[:]\n if isinstance(python_path, list):\n sys.path.extend(python_path)\n else:\n sys.path.append(python_path)\n\n yield\n sys.path = old_path\n else:\n yield\n\n\n@contextmanager\ndef run_environment(conanfile):\n with environment_append(RunEnvironment(conanfile).vars):\n yield\n\n\n@contextmanager\ndef environment_append(env_vars):\n \"\"\"\n :param env_vars: List (dict) of simple environment vars. {name: value, name2: value2} => e.g.: MYVAR=1\n The values can also be lists of appendable environment vars. {name: [value, value2]}\n => e.g. PATH=/path/1:/path/2\n If the value is set to None, then that environment variable is unset.\n :return: None\n \"\"\"\n unset_vars = []\n for key in env_vars.keys():\n if env_vars[key] is None:\n unset_vars.append(key)\n for var in unset_vars:\n env_vars.pop(var, None)\n for name, value in env_vars.items():\n if isinstance(value, list):\n env_vars[name] = os.pathsep.join(value)\n old = os.environ.get(name)\n if old:\n env_vars[name] += os.pathsep + old\n if env_vars or unset_vars:\n old_env = dict(os.environ)\n os.environ.update(env_vars)\n for var in unset_vars:\n os.environ.pop(var)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n else:\n yield\n\n\n@contextmanager\ndef no_op():\n yield\n\n\n@contextmanager\ndef remove_from_path(command):\n curpath = os.getenv(\"PATH\")\n first_it = True\n for _ in range(30):\n if not first_it:\n with environment_append({\"PATH\": curpath}):\n the_command = which(command)\n else:\n the_command = which(command)\n first_it = False\n\n if not the_command:\n break\n new_path = []\n for entry in curpath.split(os.pathsep):\n if not _path_equals(entry, os.path.dirname(the_command)):\n new_path.append(entry)\n\n curpath = os.pathsep.join(new_path)\n else:\n raise ConanException(\"Error in tools.remove_from_path!! couldn't remove the tool '%s' \"\n \"from the path after 30 attempts, still found in '%s' this is a \"\n \"Conan client bug, please open an issue at: \"\n \"https://github.com/conan-io/conan\\n\\nPATH=%s\"\n % (command, the_command, os.getenv(\"PATH\")))\n\n with environment_append({\"PATH\": curpath}):\n yield\n", "path": "conans/client/tools/env.py" } ]
[ { "content": "import os\nimport sys\nfrom contextlib import contextmanager\n\nfrom conans.client.run_environment import RunEnvironment\nfrom conans.client.tools.files import _path_equals, which\nfrom conans.errors import ConanException\n\n\n@contextmanager\ndef pythonpath(conanfile):\n python_path = conanfile.env.get(\"PYTHONPATH\", None)\n if python_path:\n old_path = sys.path[:]\n if isinstance(python_path, list):\n sys.path.extend(python_path)\n else:\n sys.path.append(python_path)\n\n yield\n sys.path = old_path\n else:\n yield\n\n\n@contextmanager\ndef run_environment(conanfile):\n with environment_append(RunEnvironment(conanfile).vars):\n yield\n\n\n@contextmanager\ndef environment_append(env_vars):\n \"\"\"\n :param env_vars: List (dict) of simple environment vars. {name: value, name2: value2} => e.g.: MYVAR=1\n The values can also be lists of appendable environment vars. {name: [value, value2]}\n => e.g. PATH=/path/1:/path/2\n If the value is set to None, then that environment variable is unset.\n :return: None\n \"\"\"\n unset_vars = []\n for key in env_vars.keys():\n if env_vars[key] is None:\n unset_vars.append(key)\n for var in unset_vars:\n env_vars.pop(var, None)\n for name, value in env_vars.items():\n if isinstance(value, list):\n env_vars[name] = os.pathsep.join(value)\n old = os.environ.get(name)\n if old:\n env_vars[name] += os.pathsep + old\n if env_vars or unset_vars:\n old_env = dict(os.environ)\n os.environ.update(env_vars)\n for var in unset_vars:\n os.environ.pop(var, None)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n else:\n yield\n\n\n@contextmanager\ndef no_op():\n yield\n\n\n@contextmanager\ndef remove_from_path(command):\n curpath = os.getenv(\"PATH\")\n first_it = True\n for _ in range(30):\n if not first_it:\n with environment_append({\"PATH\": curpath}):\n the_command = which(command)\n else:\n the_command = which(command)\n first_it = False\n\n if not the_command:\n break\n new_path = []\n for entry in curpath.split(os.pathsep):\n if not _path_equals(entry, os.path.dirname(the_command)):\n new_path.append(entry)\n\n curpath = os.pathsep.join(new_path)\n else:\n raise ConanException(\"Error in tools.remove_from_path!! couldn't remove the tool '%s' \"\n \"from the path after 30 attempts, still found in '%s' this is a \"\n \"Conan client bug, please open an issue at: \"\n \"https://github.com/conan-io/conan\\n\\nPATH=%s\"\n % (command, the_command, os.getenv(\"PATH\")))\n\n with environment_append({\"PATH\": curpath}):\n yield\n", "path": "conans/client/tools/env.py" } ]
diff --git a/conans/client/tools/env.py b/conans/client/tools/env.py index 079f549fd12..2584065ddbb 100644 --- a/conans/client/tools/env.py +++ b/conans/client/tools/env.py @@ -54,7 +54,7 @@ def environment_append(env_vars): old_env = dict(os.environ) os.environ.update(env_vars) for var in unset_vars: - os.environ.pop(var) + os.environ.pop(var, None) try: yield finally: diff --git a/conans/test/unittests/client/tools/test_env.py b/conans/test/unittests/client/tools/test_env.py index 2fcc1c1c82e..556139e306e 100644 --- a/conans/test/unittests/client/tools/test_env.py +++ b/conans/test/unittests/client/tools/test_env.py @@ -46,3 +46,9 @@ def test_environment_append_unsetting_all_variables(self): 'env_var2': 'value2'}),\ env.environment_append({'env_var1': None}): self.assertNotIn('env_var1', os.environ) + + def test_environment_append_unsetting_non_existing_variables(self): + with mock.patch.dict('os.environ', + {'env_var2': 'value2'}),\ + env.environment_append({'env_var1': None}): + self.assertNotIn('env_var1', os.environ)
translate__pootle-4882
Make `pootle webpack` not require system checks `pootle webpack` fails if eg the db is not set up/correctly. It would be helpful if it didnt
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\nimport subprocess\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom pootle_misc.baseurl import l\n\n\nclass Command(BaseCommand):\n help = 'Builds and bundles static assets using webpack'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--dev',\n action='store_true',\n dest='dev',\n default=False,\n help='Enable development builds and watch for changes.',\n )\n parser.add_argument(\n '--nowatch',\n action='store_false',\n dest='watch',\n default=True,\n help='Disable watching for changes.',\n )\n parser.add_argument(\n '--progress',\n action='store_true',\n default=False,\n help='Show progress (implied if --dev is present).',\n )\n parser.add_argument(\n '--extra',\n action='append',\n default=[],\n help='Additional options to pass to the JavaScript webpack tool.',\n )\n\n def handle(self, **options):\n default_static_dir = os.path.join(settings.WORKING_DIR, 'static')\n custom_static_dirs = filter(lambda x: x != default_static_dir,\n settings.STATICFILES_DIRS)\n default_js_dir = os.path.join(default_static_dir, 'js')\n\n webpack_config_file = os.path.join(default_js_dir, 'webpack.config.js')\n\n webpack_bin = os.path.join(default_js_dir, 'node_modules/.bin/webpack')\n if os.name == 'nt':\n webpack_bin = '%s.cmd' % webpack_bin\n\n webpack_progress = (\n '--progress' if options['progress'] or options['dev'] else ''\n )\n webpack_colors = '--colors' if not options['no_color'] else ''\n\n webpack_args = [webpack_bin, '--config=%s' % webpack_config_file]\n if webpack_progress:\n webpack_args.append(webpack_progress)\n if webpack_colors:\n webpack_args.append(webpack_colors)\n\n if options['dev']:\n watch = '--watch' if options['watch'] else ''\n webpack_args.extend([watch, '--display-error-details'])\n else:\n os.environ['NODE_ENV'] = 'production'\n webpack_args.append(\"--bail\")\n\n webpack_args.extend(options['extra'])\n\n static_base = l(settings.STATIC_URL)\n suffix = 'js/' if static_base.endswith('/') else '/js/'\n os.environ['WEBPACK_PUBLIC_PATH'] = static_base + suffix\n\n if custom_static_dirs:\n # XXX: review this for css\n # Append `js/` so that it's not necessary to reference it from the\n # `webpack.config.js` file\n custom_static_dirs = map(lambda x: os.path.join(x, 'js/'),\n custom_static_dirs)\n os.environ['WEBPACK_ROOT'] = ':'.join(custom_static_dirs)\n\n try:\n subprocess.call(webpack_args)\n except OSError:\n raise CommandError(\n 'webpack executable not found.\\n'\n 'Make sure to install it by running '\n '`cd %s && npm install`' % default_js_dir\n )\n sys.exit(0)\n", "path": "pootle/apps/pootle_app/management/commands/webpack.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\nimport subprocess\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom pootle_misc.baseurl import l\n\n\nclass Command(BaseCommand):\n help = 'Builds and bundles static assets using webpack'\n requires_system_checks = False\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--dev',\n action='store_true',\n dest='dev',\n default=False,\n help='Enable development builds and watch for changes.',\n )\n parser.add_argument(\n '--nowatch',\n action='store_false',\n dest='watch',\n default=True,\n help='Disable watching for changes.',\n )\n parser.add_argument(\n '--progress',\n action='store_true',\n default=False,\n help='Show progress (implied if --dev is present).',\n )\n parser.add_argument(\n '--extra',\n action='append',\n default=[],\n help='Additional options to pass to the JavaScript webpack tool.',\n )\n\n def handle(self, **options):\n default_static_dir = os.path.join(settings.WORKING_DIR, 'static')\n custom_static_dirs = filter(lambda x: x != default_static_dir,\n settings.STATICFILES_DIRS)\n default_js_dir = os.path.join(default_static_dir, 'js')\n\n webpack_config_file = os.path.join(default_js_dir, 'webpack.config.js')\n\n webpack_bin = os.path.join(default_js_dir, 'node_modules/.bin/webpack')\n if os.name == 'nt':\n webpack_bin = '%s.cmd' % webpack_bin\n\n webpack_progress = (\n '--progress' if options['progress'] or options['dev'] else ''\n )\n webpack_colors = '--colors' if not options['no_color'] else ''\n\n webpack_args = [webpack_bin, '--config=%s' % webpack_config_file]\n if webpack_progress:\n webpack_args.append(webpack_progress)\n if webpack_colors:\n webpack_args.append(webpack_colors)\n\n if options['dev']:\n watch = '--watch' if options['watch'] else ''\n webpack_args.extend([watch, '--display-error-details'])\n else:\n os.environ['NODE_ENV'] = 'production'\n webpack_args.append(\"--bail\")\n\n webpack_args.extend(options['extra'])\n\n static_base = l(settings.STATIC_URL)\n suffix = 'js/' if static_base.endswith('/') else '/js/'\n os.environ['WEBPACK_PUBLIC_PATH'] = static_base + suffix\n\n if custom_static_dirs:\n # XXX: review this for css\n # Append `js/` so that it's not necessary to reference it from the\n # `webpack.config.js` file\n custom_static_dirs = map(lambda x: os.path.join(x, 'js/'),\n custom_static_dirs)\n os.environ['WEBPACK_ROOT'] = ':'.join(custom_static_dirs)\n\n try:\n subprocess.call(webpack_args)\n except OSError:\n raise CommandError(\n 'webpack executable not found.\\n'\n 'Make sure to install it by running '\n '`cd %s && npm install`' % default_js_dir\n )\n sys.exit(0)\n", "path": "pootle/apps/pootle_app/management/commands/webpack.py" } ]
diff --git a/pootle/apps/pootle_app/management/commands/webpack.py b/pootle/apps/pootle_app/management/commands/webpack.py index fc5d602f8f8..1f6ce4b774c 100644 --- a/pootle/apps/pootle_app/management/commands/webpack.py +++ b/pootle/apps/pootle_app/management/commands/webpack.py @@ -19,6 +19,7 @@ class Command(BaseCommand): help = 'Builds and bundles static assets using webpack' + requires_system_checks = False def add_arguments(self, parser): parser.add_argument(
encode__django-rest-framework-1836
Adding the `DjangoFilterBackend` filter changes queryset ordering. Tried posting on StackOverflow with no reply (http://stackoverflow.com/questions/21848095/adding-filtering-changes-ordering) => decided to open bug here I have a ModelViewSet that I want to add filtering to. My simple model looks like ``` class Article(models.Model): date = = models.DateField() language = models.CharField(max_length=10) class Meta: ordering = ['-date'] ``` And the ModelViewSet (read only): ``` class ArticleViewSet(viewsets.ReadOnlyModelViewSet): queryset = Article.objects.all() serializer_class = ArticleSerializer ``` Articles on the API are now ordered by date descending as I would expect. Now I wich to allow filtering on language. I've set the filter backend to `DjangoFilterBackend` in settings.py. My updated ModelViewSet now looks like: ``` class ArticleViewSet(viewsets.ReadOnlyModelViewSet): queryset = Article.objects.all() serializer_class = ArticleSerializer filter_fields = ['language'] ``` This changes the ordering to language ASC. Adding `order_by('-date')` to queryset does not change anything. Adding `ordering = ('-date', )` does not change anything. => How do I specify both filtering and ordering (or simply use default ordering while allowing filtering)? **EDIT:** Current functionality seems to come from AutoFilterSet created in Rest Framework by default: https://github.com/tomchristie/django-rest-framework/blob/822eb39599b248c68573c3095639a831ab6df99a/rest_framework/filters.py#L53 ... where `order_by=True` and the handing of this in django-filter `get_ordering_field` here: https://github.com/alex/django-filter/blob/d88b98dd2b70551deb9c128b209fcf783b325acc/django_filters/filterset.py#L325 => Seems I have to create a FilterSet class: ``` class LanguageFilter(django_filters.FilterSet): class Meta: model = Article fields = ['language'] order_by = model()._meta.ordering class ArticleViewSet(viewsets.ReadOnlyModelViewSet): queryset = Article.objects.all() serializer_class = ArticleSerializer filter_class = LanguageFilter ``` Does this look correct? Seems a bit "much"/verbose/counter-intuitive to retain default ordering.
[ { "content": "\"\"\"\nProvides generic filtering backends that can be used to filter the results\nreturned by list views.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.utils import six\nfrom rest_framework.compat import django_filters, guardian, get_model_name\nfrom rest_framework.settings import api_settings\nfrom functools import reduce\nimport operator\n\nFilterSet = django_filters and django_filters.FilterSet or None\n\n\nclass BaseFilterBackend(object):\n \"\"\"\n A base class from which all filter backend classes should inherit.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a filtered queryset.\n \"\"\"\n raise NotImplementedError(\".filter_queryset() must be overridden.\")\n\n\nclass DjangoFilterBackend(BaseFilterBackend):\n \"\"\"\n A filter backend that uses django-filter.\n \"\"\"\n default_filter_set = FilterSet\n\n def __init__(self):\n assert django_filters, 'Using DjangoFilterBackend, but django-filter is not installed'\n\n def get_filter_class(self, view, queryset=None):\n \"\"\"\n Return the django-filters `FilterSet` used to filter the queryset.\n \"\"\"\n filter_class = getattr(view, 'filter_class', None)\n filter_fields = getattr(view, 'filter_fields', None)\n\n if filter_class:\n filter_model = filter_class.Meta.model\n\n assert issubclass(queryset.model, filter_model), \\\n 'FilterSet model %s does not match queryset model %s' % \\\n (filter_model, queryset.model)\n\n return filter_class\n\n if filter_fields:\n class AutoFilterSet(self.default_filter_set):\n class Meta:\n model = queryset.model\n fields = filter_fields\n order_by = True\n return AutoFilterSet\n\n return None\n\n def filter_queryset(self, request, queryset, view):\n filter_class = self.get_filter_class(view, queryset)\n\n if filter_class:\n return filter_class(request.QUERY_PARAMS, queryset=queryset).qs\n\n return queryset\n\n\nclass SearchFilter(BaseFilterBackend):\n # The URL query parameter used for the search.\n search_param = api_settings.SEARCH_PARAM\n\n def get_search_terms(self, request):\n \"\"\"\n Search terms are set by a ?search=... query parameter,\n and may be comma and/or whitespace delimited.\n \"\"\"\n params = request.QUERY_PARAMS.get(self.search_param, '')\n return params.replace(',', ' ').split()\n\n def construct_search(self, field_name):\n if field_name.startswith('^'):\n return \"%s__istartswith\" % field_name[1:]\n elif field_name.startswith('='):\n return \"%s__iexact\" % field_name[1:]\n elif field_name.startswith('@'):\n return \"%s__search\" % field_name[1:]\n else:\n return \"%s__icontains\" % field_name\n\n def filter_queryset(self, request, queryset, view):\n search_fields = getattr(view, 'search_fields', None)\n\n if not search_fields:\n return queryset\n\n orm_lookups = [self.construct_search(str(search_field))\n for search_field in search_fields]\n\n for search_term in self.get_search_terms(request):\n or_queries = [models.Q(**{orm_lookup: search_term})\n for orm_lookup in orm_lookups]\n queryset = queryset.filter(reduce(operator.or_, or_queries))\n\n return queryset\n\n\nclass OrderingFilter(BaseFilterBackend):\n # The URL query parameter used for the ordering.\n ordering_param = api_settings.ORDERING_PARAM\n ordering_fields = None\n\n def get_ordering(self, request):\n \"\"\"\n Ordering is set by a comma delimited ?ordering=... query parameter.\n\n The `ordering` query parameter can be overridden by setting\n the `ordering_param` value on the OrderingFilter or by\n specifying an `ORDERING_PARAM` value in the API settings.\n \"\"\"\n params = request.QUERY_PARAMS.get(self.ordering_param)\n if params:\n return [param.strip() for param in params.split(',')]\n\n def get_default_ordering(self, view):\n ordering = getattr(view, 'ordering', None)\n if isinstance(ordering, six.string_types):\n return (ordering,)\n return ordering\n\n def remove_invalid_fields(self, queryset, ordering, view):\n valid_fields = getattr(view, 'ordering_fields', self.ordering_fields)\n\n if valid_fields is None:\n # Default to allowing filtering on serializer fields\n serializer_class = getattr(view, 'serializer_class')\n if serializer_class is None:\n msg = (\"Cannot use %s on a view which does not have either a \"\n \"'serializer_class' or 'ordering_fields' attribute.\")\n raise ImproperlyConfigured(msg % self.__class__.__name__)\n valid_fields = [\n field.source or field_name\n for field_name, field in serializer_class().fields.items()\n if not getattr(field, 'write_only', False)\n ]\n elif valid_fields == '__all__':\n # View explictly allows filtering on any model field\n valid_fields = [field.name for field in queryset.model._meta.fields]\n valid_fields += queryset.query.aggregates.keys()\n\n return [term for term in ordering if term.lstrip('-') in valid_fields]\n\n def filter_queryset(self, request, queryset, view):\n ordering = self.get_ordering(request)\n\n if ordering:\n # Skip any incorrect parameters\n ordering = self.remove_invalid_fields(queryset, ordering, view)\n\n if not ordering:\n # Use 'ordering' attribute by default\n ordering = self.get_default_ordering(view)\n\n if ordering:\n return queryset.order_by(*ordering)\n\n return queryset\n\n\nclass DjangoObjectPermissionsFilter(BaseFilterBackend):\n \"\"\"\n A filter backend that limits results to those where the requesting user\n has read object level permissions.\n \"\"\"\n def __init__(self):\n assert guardian, 'Using DjangoObjectPermissionsFilter, but django-guardian is not installed'\n\n perm_format = '%(app_label)s.view_%(model_name)s'\n\n def filter_queryset(self, request, queryset, view):\n user = request.user\n model_cls = queryset.model\n kwargs = {\n 'app_label': model_cls._meta.app_label,\n 'model_name': get_model_name(model_cls)\n }\n permission = self.perm_format % kwargs\n return guardian.shortcuts.get_objects_for_user(user, permission, queryset)\n", "path": "rest_framework/filters.py" } ]
[ { "content": "\"\"\"\nProvides generic filtering backends that can be used to filter the results\nreturned by list views.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.utils import six\nfrom rest_framework.compat import django_filters, guardian, get_model_name\nfrom rest_framework.settings import api_settings\nfrom functools import reduce\nimport operator\n\nFilterSet = django_filters and django_filters.FilterSet or None\n\n\nclass BaseFilterBackend(object):\n \"\"\"\n A base class from which all filter backend classes should inherit.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a filtered queryset.\n \"\"\"\n raise NotImplementedError(\".filter_queryset() must be overridden.\")\n\n\nclass DjangoFilterBackend(BaseFilterBackend):\n \"\"\"\n A filter backend that uses django-filter.\n \"\"\"\n default_filter_set = FilterSet\n\n def __init__(self):\n assert django_filters, 'Using DjangoFilterBackend, but django-filter is not installed'\n\n def get_filter_class(self, view, queryset=None):\n \"\"\"\n Return the django-filters `FilterSet` used to filter the queryset.\n \"\"\"\n filter_class = getattr(view, 'filter_class', None)\n filter_fields = getattr(view, 'filter_fields', None)\n\n if filter_class:\n filter_model = filter_class.Meta.model\n\n assert issubclass(queryset.model, filter_model), \\\n 'FilterSet model %s does not match queryset model %s' % \\\n (filter_model, queryset.model)\n\n return filter_class\n\n if filter_fields:\n class AutoFilterSet(self.default_filter_set):\n class Meta:\n model = queryset.model\n fields = filter_fields\n return AutoFilterSet\n\n return None\n\n def filter_queryset(self, request, queryset, view):\n filter_class = self.get_filter_class(view, queryset)\n\n if filter_class:\n return filter_class(request.QUERY_PARAMS, queryset=queryset).qs\n\n return queryset\n\n\nclass SearchFilter(BaseFilterBackend):\n # The URL query parameter used for the search.\n search_param = api_settings.SEARCH_PARAM\n\n def get_search_terms(self, request):\n \"\"\"\n Search terms are set by a ?search=... query parameter,\n and may be comma and/or whitespace delimited.\n \"\"\"\n params = request.QUERY_PARAMS.get(self.search_param, '')\n return params.replace(',', ' ').split()\n\n def construct_search(self, field_name):\n if field_name.startswith('^'):\n return \"%s__istartswith\" % field_name[1:]\n elif field_name.startswith('='):\n return \"%s__iexact\" % field_name[1:]\n elif field_name.startswith('@'):\n return \"%s__search\" % field_name[1:]\n else:\n return \"%s__icontains\" % field_name\n\n def filter_queryset(self, request, queryset, view):\n search_fields = getattr(view, 'search_fields', None)\n\n if not search_fields:\n return queryset\n\n orm_lookups = [self.construct_search(str(search_field))\n for search_field in search_fields]\n\n for search_term in self.get_search_terms(request):\n or_queries = [models.Q(**{orm_lookup: search_term})\n for orm_lookup in orm_lookups]\n queryset = queryset.filter(reduce(operator.or_, or_queries))\n\n return queryset\n\n\nclass OrderingFilter(BaseFilterBackend):\n # The URL query parameter used for the ordering.\n ordering_param = api_settings.ORDERING_PARAM\n ordering_fields = None\n\n def get_ordering(self, request):\n \"\"\"\n Ordering is set by a comma delimited ?ordering=... query parameter.\n\n The `ordering` query parameter can be overridden by setting\n the `ordering_param` value on the OrderingFilter or by\n specifying an `ORDERING_PARAM` value in the API settings.\n \"\"\"\n params = request.QUERY_PARAMS.get(self.ordering_param)\n if params:\n return [param.strip() for param in params.split(',')]\n\n def get_default_ordering(self, view):\n ordering = getattr(view, 'ordering', None)\n if isinstance(ordering, six.string_types):\n return (ordering,)\n return ordering\n\n def remove_invalid_fields(self, queryset, ordering, view):\n valid_fields = getattr(view, 'ordering_fields', self.ordering_fields)\n\n if valid_fields is None:\n # Default to allowing filtering on serializer fields\n serializer_class = getattr(view, 'serializer_class')\n if serializer_class is None:\n msg = (\"Cannot use %s on a view which does not have either a \"\n \"'serializer_class' or 'ordering_fields' attribute.\")\n raise ImproperlyConfigured(msg % self.__class__.__name__)\n valid_fields = [\n field.source or field_name\n for field_name, field in serializer_class().fields.items()\n if not getattr(field, 'write_only', False)\n ]\n elif valid_fields == '__all__':\n # View explictly allows filtering on any model field\n valid_fields = [field.name for field in queryset.model._meta.fields]\n valid_fields += queryset.query.aggregates.keys()\n\n return [term for term in ordering if term.lstrip('-') in valid_fields]\n\n def filter_queryset(self, request, queryset, view):\n ordering = self.get_ordering(request)\n\n if ordering:\n # Skip any incorrect parameters\n ordering = self.remove_invalid_fields(queryset, ordering, view)\n\n if not ordering:\n # Use 'ordering' attribute by default\n ordering = self.get_default_ordering(view)\n\n if ordering:\n return queryset.order_by(*ordering)\n\n return queryset\n\n\nclass DjangoObjectPermissionsFilter(BaseFilterBackend):\n \"\"\"\n A filter backend that limits results to those where the requesting user\n has read object level permissions.\n \"\"\"\n def __init__(self):\n assert guardian, 'Using DjangoObjectPermissionsFilter, but django-guardian is not installed'\n\n perm_format = '%(app_label)s.view_%(model_name)s'\n\n def filter_queryset(self, request, queryset, view):\n user = request.user\n model_cls = queryset.model\n kwargs = {\n 'app_label': model_cls._meta.app_label,\n 'model_name': get_model_name(model_cls)\n }\n permission = self.perm_format % kwargs\n return guardian.shortcuts.get_objects_for_user(user, permission, queryset)\n", "path": "rest_framework/filters.py" } ]
diff --git a/rest_framework/filters.py b/rest_framework/filters.py index e20800130d..c580f9351b 100644 --- a/rest_framework/filters.py +++ b/rest_framework/filters.py @@ -56,7 +56,6 @@ class AutoFilterSet(self.default_filter_set): class Meta: model = queryset.model fields = filter_fields - order_by = True return AutoFilterSet return None diff --git a/tests/test_filters.py b/tests/test_filters.py index 47bffd4366..5722fd7c5f 100644 --- a/tests/test_filters.py +++ b/tests/test_filters.py @@ -408,16 +408,61 @@ class SearchListView(generics.ListAPIView): ) -class OrdringFilterModel(models.Model): +class OrderingFilterModel(models.Model): title = models.CharField(max_length=20) text = models.CharField(max_length=100) class OrderingFilterRelatedModel(models.Model): - related_object = models.ForeignKey(OrdringFilterModel, + related_object = models.ForeignKey(OrderingFilterModel, related_name="relateds") +class DjangoFilterOrderingModel(models.Model): + date = models.DateField() + text = models.CharField(max_length=10) + + class Meta: + ordering = ['-date'] + + +class DjangoFilterOrderingTests(TestCase): + def setUp(self): + data = [{ + 'date': datetime.date(2012, 10, 8), + 'text': 'abc' + }, { + 'date': datetime.date(2013, 10, 8), + 'text': 'bcd' + }, { + 'date': datetime.date(2014, 10, 8), + 'text': 'cde' + }] + + for d in data: + DjangoFilterOrderingModel.objects.create(**d) + + def test_default_ordering(self): + class DjangoFilterOrderingView(generics.ListAPIView): + model = DjangoFilterOrderingModel + filter_backends = (filters.DjangoFilterBackend,) + filter_fields = ['text'] + ordering = ('-date',) + + view = DjangoFilterOrderingView.as_view() + request = factory.get('/') + response = view(request) + + self.assertEqual( + response.data, + [ + {'id': 3, 'date': datetime.date(2014, 10, 8), 'text': 'cde'}, + {'id': 2, 'date': datetime.date(2013, 10, 8), 'text': 'bcd'}, + {'id': 1, 'date': datetime.date(2012, 10, 8), 'text': 'abc'} + ] + ) + + class OrderingFilterTests(TestCase): def setUp(self): # Sequence of title/text is: @@ -436,11 +481,11 @@ def setUp(self): chr(idx + ord('b')) + chr(idx + ord('c')) ) - OrdringFilterModel(title=title, text=text).save() + OrderingFilterModel(title=title, text=text).save() def test_ordering(self): class OrderingListView(generics.ListAPIView): - model = OrdringFilterModel + model = OrderingFilterModel filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',) @@ -459,7 +504,7 @@ class OrderingListView(generics.ListAPIView): def test_reverse_ordering(self): class OrderingListView(generics.ListAPIView): - model = OrdringFilterModel + model = OrderingFilterModel filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',) @@ -478,7 +523,7 @@ class OrderingListView(generics.ListAPIView): def test_incorrectfield_ordering(self): class OrderingListView(generics.ListAPIView): - model = OrdringFilterModel + model = OrderingFilterModel filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',) @@ -497,7 +542,7 @@ class OrderingListView(generics.ListAPIView): def test_default_ordering(self): class OrderingListView(generics.ListAPIView): - model = OrdringFilterModel + model = OrderingFilterModel filter_backends = (filters.OrderingFilter,) ordering = ('title',) oredering_fields = ('text',) @@ -516,7 +561,7 @@ class OrderingListView(generics.ListAPIView): def test_default_ordering_using_string(self): class OrderingListView(generics.ListAPIView): - model = OrdringFilterModel + model = OrderingFilterModel filter_backends = (filters.OrderingFilter,) ordering = 'title' ordering_fields = ('text',) @@ -536,7 +581,7 @@ class OrderingListView(generics.ListAPIView): def test_ordering_by_aggregate_field(self): # create some related models to aggregate order by num_objs = [2, 5, 3] - for obj, num_relateds in zip(OrdringFilterModel.objects.all(), + for obj, num_relateds in zip(OrderingFilterModel.objects.all(), num_objs): for _ in range(num_relateds): new_related = OrderingFilterRelatedModel( @@ -545,11 +590,11 @@ def test_ordering_by_aggregate_field(self): new_related.save() class OrderingListView(generics.ListAPIView): - model = OrdringFilterModel + model = OrderingFilterModel filter_backends = (filters.OrderingFilter,) ordering = 'title' ordering_fields = '__all__' - queryset = OrdringFilterModel.objects.all().annotate( + queryset = OrderingFilterModel.objects.all().annotate( models.Count("relateds")) view = OrderingListView.as_view() @@ -567,7 +612,7 @@ class OrderingListView(generics.ListAPIView): def test_ordering_with_nonstandard_ordering_param(self): with temporary_setting('ORDERING_PARAM', 'order', filters): class OrderingListView(generics.ListAPIView): - model = OrdringFilterModel + model = OrderingFilterModel filter_backends = (filters.OrderingFilter,) ordering = ('title',) ordering_fields = ('text',)
codespell-project__codespell-3015
ruff is causing PR checks to fail PR checks are failing on "Run make check" due to this error: ``` ruff . Error: codespell_lib/_codespell.py:194:17: PLE1300 Unsupported format character '}' make: *** [Makefile:42: ruff] Error 1 Error: Process completed with exit code 2. ``` Recently, the ruff version increased from `ruff-0.0.282` to `ruff-0.0.283`. Either fix the Python code, or downgrade ruff.
[ { "content": "#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see\n# https://www.gnu.org/licenses/old-licenses/gpl-2.0.html.\n\"\"\"\nCopyright (C) 2010-2011 Lucas De Marchi <[email protected]>\nCopyright (C) 2011 ProFUSION embedded systems\n\"\"\"\n\nimport argparse\nimport configparser\nimport fnmatch\nimport os\nimport re\nimport sys\nimport textwrap\nfrom typing import Dict, List, Match, Optional, Pattern, Sequence, Set, Tuple\n\n# autogenerated by setuptools_scm\nfrom ._version import __version__ as VERSION # type: ignore # noqa: N812\n\nword_regex_def = r\"[\\w\\-'’]+\"\n# While we want to treat characters like ( or \" as okay for a starting break,\n# these may occur unescaped in URIs, and so we are more restrictive on the\n# endpoint. Emails are more restrictive, so the endpoint remains flexible.\nuri_regex_def = (\n \"(\\\\b(?:https?|[ts]?ftp|file|git|smb)://[^\\\\s]+(?=$|\\\\s)|\"\n \"\\\\b[\\\\w.%+-]+@[\\\\w.-]+\\\\b)\"\n)\n# Pass all misspellings through this translation table to generate\n# alternative misspellings and fixes.\nalt_chars = ((\"'\", \"’\"),)\nencodings = (\"utf-8\", \"iso-8859-1\")\nUSAGE = \"\"\"\n\\t%prog [OPTIONS] [file1 file2 ... fileN]\n\"\"\"\n\nsupported_languages_en = (\"en\", \"en_GB\", \"en_US\", \"en_CA\", \"en_AU\")\nsupported_languages = supported_languages_en\n\n# Users might want to link this file into /usr/local/bin, so we resolve the\n# symbolic link path to the real path if necessary.\n_data_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data\")\n_builtin_dictionaries = (\n # name, desc, name, err in aspell, correction in aspell, \\\n # err dictionary array, rep dictionary array\n # The arrays must contain the names of aspell dictionaries\n # The aspell tests here aren't the ideal state, but the None's are\n # realistic for obscure words\n (\"clear\", \"for unambiguous errors\", \"\", False, None, supported_languages_en, None),\n (\n \"rare\",\n \"for rare (but valid) words that are likely to be errors\",\n \"_rare\", # noqa: E501\n None,\n None,\n None,\n None,\n ),\n (\n \"informal\",\n \"for making informal words more formal\",\n \"_informal\",\n True,\n True,\n supported_languages_en,\n supported_languages_en,\n ),\n (\n \"usage\",\n \"for replacing phrasing with recommended terms\",\n \"_usage\",\n None,\n None,\n None,\n None,\n ),\n (\n \"code\",\n \"for words from code and/or mathematics that are likely to be typos in other contexts (such as uint)\", # noqa: E501\n \"_code\",\n None,\n None,\n None,\n None,\n ),\n (\n \"names\",\n \"for valid proper names that might be typos\",\n \"_names\",\n None,\n None,\n None,\n None,\n ),\n (\n \"en-GB_to_en-US\",\n \"for corrections from en-GB to en-US\",\n \"_en-GB_to_en-US\", # noqa: E501\n True,\n True,\n (\"en_GB\",),\n (\"en_US\",),\n ),\n)\n_builtin_default = \"clear,rare\"\n\n# docs say os.EX_USAGE et al. are only available on Unix systems, so to be safe\n# we protect and just use the values they are on macOS and Linux\nEX_OK = 0\nEX_USAGE = 64\nEX_DATAERR = 65\n\n# OPTIONS:\n#\n# ARGUMENTS:\n# dict_filename The file containing the dictionary of misspellings.\n# If set to '-', it will be read from stdin\n# file1 .. fileN Files to check spelling\n\n\nclass QuietLevels:\n NONE = 0\n ENCODING = 1\n BINARY_FILE = 2\n DISABLED_FIXES = 4\n NON_AUTOMATIC_FIXES = 8\n FIXES = 16\n CONFIG_FILES = 32\n\n\nclass GlobMatch:\n def __init__(self, pattern: Optional[str]) -> None:\n self.pattern_list: Optional[List[str]]\n if pattern:\n # Pattern might be a list of comma-delimited strings\n self.pattern_list = \",\".join(pattern).split(\",\")\n else:\n self.pattern_list = None\n\n def match(self, filename: str) -> bool:\n if self.pattern_list is None:\n return False\n\n return any(fnmatch.fnmatch(filename, p) for p in self.pattern_list)\n\n\nclass Misspelling:\n def __init__(self, data: str, fix: bool, reason: str) -> None:\n self.data = data\n self.fix = fix\n self.reason = reason\n\n\nclass TermColors:\n def __init__(self) -> None:\n self.FILE = \"\\033[33m\"\n self.WWORD = \"\\033[31m\"\n self.FWORD = \"\\033[32m\"\n self.DISABLE = \"\\033[0m\"\n\n def disable(self) -> None:\n self.FILE = \"\"\n self.WWORD = \"\"\n self.FWORD = \"\"\n self.DISABLE = \"\"\n\n\nclass Summary:\n def __init__(self) -> None:\n self.summary: Dict[str, int] = {}\n\n def update(self, wrongword: str) -> None:\n if wrongword in self.summary:\n self.summary[wrongword] += 1\n else:\n self.summary[wrongword] = 1\n\n def __str__(self) -> str:\n keys = list(self.summary.keys())\n keys.sort()\n\n return \"\\n\".join(\n [\n \"{0}{1:{width}}\".format(key, self.summary.get(key), width=15 - len(key))\n for key in keys\n ]\n )\n\n\nclass FileOpener:\n def __init__(self, use_chardet: bool, quiet_level: int) -> None:\n self.use_chardet = use_chardet\n if use_chardet:\n self.init_chardet()\n self.quiet_level = quiet_level\n\n def init_chardet(self) -> None:\n try:\n from chardet.universaldetector import UniversalDetector\n except ImportError:\n raise ImportError(\n \"There's no chardet installed to import from. \"\n \"Please, install it and check your PYTHONPATH \"\n \"environment variable\"\n )\n\n self.encdetector = UniversalDetector()\n\n def open(self, filename: str) -> Tuple[List[str], str]:\n if self.use_chardet:\n return self.open_with_chardet(filename)\n return self.open_with_internal(filename)\n\n def open_with_chardet(self, filename: str) -> Tuple[List[str], str]:\n self.encdetector.reset()\n with open(filename, \"rb\") as fb:\n for line in fb:\n self.encdetector.feed(line)\n if self.encdetector.done:\n break\n self.encdetector.close()\n encoding = self.encdetector.result[\"encoding\"]\n\n try:\n f = open(filename, encoding=encoding, newline=\"\")\n except UnicodeDecodeError:\n print(f\"ERROR: Could not detect encoding: {filename}\", file=sys.stderr)\n raise\n except LookupError:\n print(\n f\"ERROR: Don't know how to handle encoding {encoding}: {filename}\",\n file=sys.stderr,\n )\n raise\n else:\n lines = f.readlines()\n f.close()\n\n return lines, f.encoding\n\n def open_with_internal(self, filename: str) -> Tuple[List[str], str]:\n encoding = None\n first_try = True\n for encoding in encodings:\n if first_try:\n first_try = False\n elif not self.quiet_level & QuietLevels.ENCODING:\n print(f'WARNING: Trying next encoding \"{encoding}\"', file=sys.stderr)\n with open(filename, encoding=encoding, newline=\"\") as f:\n try:\n lines = f.readlines()\n except UnicodeDecodeError:\n if not self.quiet_level & QuietLevels.ENCODING:\n print(\n f'WARNING: Cannot decode file using encoding \"{encoding}\": '\n f\"{filename}\",\n file=sys.stderr,\n )\n else:\n break\n else:\n raise Exception(\"Unknown encoding\")\n\n return lines, encoding\n\n\n# -.-:-.-:-.-:-.:-.-:-.-:-.-:-.-:-.:-.-:-.-:-.-:-.-:-.:-.-:-\n\n\n# If someday this breaks, we can just switch to using RawTextHelpFormatter,\n# but it has the disadvantage of not wrapping our long lines.\n\n\nclass NewlineHelpFormatter(argparse.HelpFormatter):\n \"\"\"Help formatter that preserves newlines and deals with lists.\"\"\"\n\n def _split_lines(self, text: str, width: int) -> List[str]:\n parts = text.split(\"\\n\")\n out = []\n for part in parts:\n # Eventually we could allow others...\n indent_start = \"- \"\n if part.startswith(indent_start):\n offset = len(indent_start)\n else:\n offset = 0\n part = part[offset:]\n part = self._whitespace_matcher.sub(\" \", part).strip()\n parts = textwrap.wrap(part, width - offset)\n parts = [\" \" * offset + p for p in parts]\n if offset:\n parts[0] = indent_start + parts[0][offset:]\n out.extend(parts)\n return out\n\n\ndef parse_options(\n args: Sequence[str],\n) -> Tuple[argparse.Namespace, argparse.ArgumentParser, List[str]]:\n parser = argparse.ArgumentParser(formatter_class=NewlineHelpFormatter)\n\n parser.set_defaults(colors=sys.stdout.isatty())\n parser.add_argument(\"--version\", action=\"version\", version=VERSION)\n\n parser.add_argument(\n \"-d\",\n \"--disable-colors\",\n action=\"store_false\",\n dest=\"colors\",\n help=\"disable colors, even when printing to terminal \"\n \"(always set for Windows)\",\n )\n parser.add_argument(\n \"-c\",\n \"--enable-colors\",\n action=\"store_true\",\n dest=\"colors\",\n help=\"enable colors, even when not printing to terminal\",\n )\n\n parser.add_argument(\n \"-w\",\n \"--write-changes\",\n action=\"store_true\",\n default=False,\n help=\"write changes in place if possible\",\n )\n\n parser.add_argument(\n \"-D\",\n \"--dictionary\",\n action=\"append\",\n help=\"custom dictionary file that contains spelling \"\n \"corrections. If this flag is not specified or \"\n 'equals \"-\" then the default dictionary is used. '\n \"This option can be specified multiple times.\",\n )\n builtin_opts = \"\\n- \".join(\n [\"\"] + [f\"{d[0]!r} {d[1]}\" for d in _builtin_dictionaries]\n )\n parser.add_argument(\n \"--builtin\",\n dest=\"builtin\",\n default=_builtin_default,\n metavar=\"BUILTIN-LIST\",\n help=\"comma-separated list of builtin dictionaries \"\n 'to include (when \"-D -\" or no \"-D\" is passed). '\n \"Current options are:\" + builtin_opts + \"\\n\"\n \"The default is %(default)r.\",\n )\n parser.add_argument(\n \"--ignore-regex\",\n action=\"store\",\n type=str,\n help=\"regular expression that is used to find \"\n \"patterns to ignore by treating as whitespace. \"\n \"When writing regular expressions, consider \"\n \"ensuring there are boundary non-word chars, \"\n 'e.g., \"\\\\bmatch\\\\b\". Defaults to '\n \"empty/disabled.\",\n )\n parser.add_argument(\n \"-I\",\n \"--ignore-words\",\n action=\"append\",\n metavar=\"FILE\",\n help=\"file that contains words that will be ignored \"\n \"by codespell. File must contain 1 word per line.\"\n \" Words are case sensitive based on how they are \"\n \"written in the dictionary file\",\n )\n parser.add_argument(\n \"-L\",\n \"--ignore-words-list\",\n action=\"append\",\n metavar=\"WORDS\",\n help=\"comma separated list of words to be ignored \"\n \"by codespell. Words are case sensitive based on \"\n \"how they are written in the dictionary file\",\n )\n parser.add_argument(\n \"--uri-ignore-words-list\",\n action=\"append\",\n metavar=\"WORDS\",\n help=\"comma separated list of words to be ignored \"\n \"by codespell in URIs and emails only. Words are \"\n \"case sensitive based on how they are written in \"\n 'the dictionary file. If set to \"*\", all '\n \"misspelling in URIs and emails will be ignored.\",\n )\n parser.add_argument(\n \"-r\",\n \"--regex\",\n action=\"store\",\n type=str,\n help=\"regular expression that is used to find words. \"\n \"By default any alphanumeric character, the \"\n \"underscore, the hyphen, and the apostrophe are \"\n \"used to build words. This option cannot be \"\n \"specified together with --write-changes.\",\n )\n parser.add_argument(\n \"--uri-regex\",\n action=\"store\",\n type=str,\n help=\"regular expression that is used to find URIs \"\n \"and emails. A default expression is provided.\",\n )\n parser.add_argument(\n \"-s\",\n \"--summary\",\n action=\"store_true\",\n default=False,\n help=\"print summary of fixes\",\n )\n\n parser.add_argument(\n \"--count\",\n action=\"store_true\",\n default=False,\n help=\"print the number of errors as the last line of stderr\",\n )\n\n parser.add_argument(\n \"-S\",\n \"--skip\",\n action=\"append\",\n help=\"comma-separated list of files to skip. It \"\n \"accepts globs as well. E.g.: if you want \"\n \"codespell to skip .eps and .txt files, \"\n 'you\\'d give \"*.eps,*.txt\" to this option.',\n )\n\n parser.add_argument(\n \"-x\",\n \"--exclude-file\",\n type=str,\n metavar=\"FILE\",\n help=\"ignore whole lines that match those \"\n \"in the file FILE. The lines in FILE \"\n \"should match the to-be-excluded lines exactly\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store\",\n type=int,\n default=0,\n help=\"set interactive mode when writing changes:\\n\"\n \"- 0: no interactivity.\\n\"\n \"- 1: ask for confirmation.\\n\"\n \"- 2: ask user to choose one fix when more than one is available.\\n\" # noqa: E501\n \"- 3: both 1 and 2\",\n )\n\n parser.add_argument(\n \"-q\",\n \"--quiet-level\",\n action=\"store\",\n type=int,\n default=34,\n help=\"bitmask that allows suppressing messages:\\n\"\n \"- 0: print all messages.\\n\"\n \"- 1: disable warnings about wrong encoding.\\n\"\n \"- 2: disable warnings about binary files.\\n\"\n \"- 4: omit warnings about automatic fixes that were disabled in the dictionary.\\n\" # noqa: E501\n \"- 8: don't print anything for non-automatic fixes.\\n\" # noqa: E501\n \"- 16: don't print the list of fixed files.\\n\"\n \"- 32: don't print configuration files.\\n\"\n \"As usual with bitmasks, these levels can be \"\n \"combined; e.g. use 3 for levels 1+2, 7 for \"\n \"1+2+4, 23 for 1+2+4+16, etc. \"\n \"The default mask is %(default)s.\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--hard-encoding-detection\",\n action=\"store_true\",\n default=False,\n help=\"use chardet to detect the encoding of each \"\n \"file. This can slow down codespell, but is more \"\n \"reliable in detecting encodings other than \"\n \"utf-8, iso8859-1, and ascii.\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--check-filenames\",\n action=\"store_true\",\n default=False,\n help=\"check file names as well\",\n )\n\n parser.add_argument(\n \"-H\",\n \"--check-hidden\",\n action=\"store_true\",\n default=False,\n help=\"check hidden files and directories (those \" 'starting with \".\") as well.',\n )\n parser.add_argument(\n \"-A\",\n \"--after-context\",\n type=int,\n metavar=\"LINES\",\n help=\"print LINES of trailing context\",\n )\n parser.add_argument(\n \"-B\",\n \"--before-context\",\n type=int,\n metavar=\"LINES\",\n help=\"print LINES of leading context\",\n )\n parser.add_argument(\n \"-C\",\n \"--context\",\n type=int,\n metavar=\"LINES\",\n help=\"print LINES of surrounding context\",\n )\n parser.add_argument(\"--config\", type=str, help=\"path to config file.\")\n parser.add_argument(\"--toml\", type=str, help=\"path to a pyproject.toml file.\")\n parser.add_argument(\"files\", nargs=\"*\", help=\"files or directories to check\")\n\n # Parse command line options.\n options = parser.parse_args(list(args))\n\n # Load config files and look for ``codespell`` options.\n cfg_files = [\"setup.cfg\", \".codespellrc\"]\n if options.config:\n cfg_files.append(options.config)\n config = configparser.ConfigParser(interpolation=None)\n\n # Read toml before other config files.\n toml_files = []\n tomllib_raise_error = False\n if os.path.isfile(\"pyproject.toml\"):\n toml_files.append(\"pyproject.toml\")\n if options.toml:\n toml_files.append(options.toml)\n tomllib_raise_error = True\n if toml_files:\n if sys.version_info >= (3, 11):\n import tomllib\n else:\n try:\n import tomli as tomllib # type: ignore[no-redef]\n except ImportError as e:\n if tomllib_raise_error:\n raise ImportError(\n f\"tomllib or tomli are required to read pyproject.toml \"\n f\"but could not be imported, got: {e}\"\n ) from None\n tomllib = None # type: ignore[assignment]\n if tomllib is not None:\n for toml_file in toml_files:\n with open(toml_file, \"rb\") as f:\n data = tomllib.load(f).get(\"tool\", {})\n config.read_dict(data)\n\n # Collect which config files are going to be used\n used_cfg_files = []\n for cfg_file in cfg_files:\n _cfg = configparser.ConfigParser()\n _cfg.read(cfg_file)\n if _cfg.has_section(\"codespell\"):\n used_cfg_files.append(cfg_file)\n\n # Use config files\n config.read(cfg_files)\n if config.has_section(\"codespell\"):\n # Build a \"fake\" argv list using option name and value.\n cfg_args = []\n for key in config[\"codespell\"]:\n # Add option as arg.\n cfg_args.append(f\"--{key}\")\n # If value is blank, skip.\n val = config[\"codespell\"][key]\n if val:\n cfg_args.append(val)\n\n # Parse config file options.\n options = parser.parse_args(cfg_args)\n\n # Re-parse command line options to override config.\n options = parser.parse_args(list(args), namespace=options)\n\n if not options.files:\n options.files.append(\".\")\n\n return options, parser, used_cfg_files\n\n\ndef parse_ignore_words_option(ignore_words_option: List[str]) -> Set[str]:\n ignore_words = set()\n if ignore_words_option:\n for comma_separated_words in ignore_words_option:\n for word in comma_separated_words.split(\",\"):\n ignore_words.add(word.strip())\n return ignore_words\n\n\ndef build_exclude_hashes(filename: str, exclude_lines: Set[str]) -> None:\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n exclude_lines.add(line)\n\n\ndef build_ignore_words(filename: str, ignore_words: Set[str]) -> None:\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n ignore_words.add(line.strip())\n\n\ndef add_misspelling(\n key: str,\n data: str,\n misspellings: Dict[str, Misspelling],\n) -> None:\n data = data.strip()\n\n if \",\" in data:\n fix = False\n data, reason = data.rsplit(\",\", 1)\n reason = reason.lstrip()\n else:\n fix = True\n reason = \"\"\n\n misspellings[key] = Misspelling(data, fix, reason)\n\n\ndef build_dict(\n filename: str,\n misspellings: Dict[str, Misspelling],\n ignore_words: Set[str],\n) -> None:\n with open(filename, encoding=\"utf-8\") as f:\n translate_tables = [(x, str.maketrans(x, y)) for x, y in alt_chars]\n for line in f:\n [key, data] = line.split(\"->\")\n # TODO for now, convert both to lower. Someday we can maybe add\n # support for fixing caps.\n key = key.lower()\n data = data.lower()\n if key not in ignore_words:\n add_misspelling(key, data, misspellings)\n # generate alternative misspellings/fixes\n for x, table in translate_tables:\n if x in key:\n alt_key = key.translate(table)\n alt_data = data.translate(table)\n if alt_key not in ignore_words:\n add_misspelling(alt_key, alt_data, misspellings)\n\n\ndef is_hidden(filename: str, check_hidden: bool) -> bool:\n bfilename = os.path.basename(filename)\n\n return bfilename not in (\"\", \".\", \"..\") and (\n not check_hidden and bfilename[0] == \".\"\n )\n\n\ndef is_text_file(filename: str) -> bool:\n with open(filename, mode=\"rb\") as f:\n s = f.read(1024)\n return b\"\\x00\" not in s\n\n\ndef fix_case(word: str, fixword: str) -> str:\n if word == word.capitalize():\n return \", \".join(w.strip().capitalize() for w in fixword.split(\",\"))\n if word == word.upper():\n return fixword.upper()\n # they are both lower case\n # or we don't have any idea\n return fixword\n\n\ndef ask_for_word_fix(\n line: str,\n match: Match[str],\n misspelling: Misspelling,\n interactivity: int,\n colors: TermColors,\n) -> Tuple[bool, str]:\n wrongword = match.group()\n if interactivity <= 0:\n return misspelling.fix, fix_case(wrongword, misspelling.data)\n\n line_ui = (\n f\"{line[:match.start()]}\"\n f\"{colors.WWORD}{wrongword}{colors.DISABLE}\"\n f\"{line[match.end():]}\"\n )\n\n if misspelling.fix and interactivity & 1:\n r = \"\"\n fixword = fix_case(wrongword, misspelling.data)\n while not r:\n print(f\"{line_ui}\\t{wrongword} ==> {fixword} (Y/n) \", end=\"\", flush=True)\n r = sys.stdin.readline().strip().upper()\n if not r:\n r = \"Y\"\n if r not in (\"Y\", \"N\"):\n print(\"Say 'y' or 'n'\")\n r = \"\"\n\n if r == \"N\":\n misspelling.fix = False\n\n elif (interactivity & 2) and not misspelling.reason:\n # if it is not disabled, i.e. it just has more than one possible fix,\n # we ask the user which word to use\n\n r = \"\"\n opt = [w.strip() for w in misspelling.data.split(\",\")]\n while not r:\n print(f\"{line_ui} Choose an option (blank for none): \", end=\"\")\n for i, o in enumerate(opt):\n fixword = fix_case(wrongword, o)\n print(f\" {i}) {fixword}\", end=\"\")\n print(\": \", end=\"\", flush=True)\n\n n = sys.stdin.readline().strip()\n if not n:\n break\n\n try:\n i = int(n)\n r = opt[i]\n except (ValueError, IndexError):\n print(\"Not a valid option\\n\")\n\n if r:\n misspelling.fix = True\n misspelling.data = r\n\n return misspelling.fix, fix_case(wrongword, misspelling.data)\n\n\ndef print_context(\n lines: List[str],\n index: int,\n context: Tuple[int, int],\n) -> None:\n # context = (context_before, context_after)\n for i in range(index - context[0], index + context[1] + 1):\n if 0 <= i < len(lines):\n print(f\"{'>' if i == index else ':'} {lines[i].rstrip()}\")\n\n\ndef _ignore_word_sub(\n text: str,\n ignore_word_regex: Optional[Pattern[str]],\n) -> str:\n if ignore_word_regex:\n text = ignore_word_regex.sub(\" \", text)\n return text\n\n\ndef extract_words(\n text: str,\n word_regex: Pattern[str],\n ignore_word_regex: Optional[Pattern[str]],\n) -> List[str]:\n return word_regex.findall(_ignore_word_sub(text, ignore_word_regex))\n\n\ndef extract_words_iter(\n text: str,\n word_regex: Pattern[str],\n ignore_word_regex: Optional[Pattern[str]],\n) -> List[Match[str]]:\n return list(word_regex.finditer(_ignore_word_sub(text, ignore_word_regex)))\n\n\ndef apply_uri_ignore_words(\n check_matches: List[Match[str]],\n line: str,\n word_regex: Pattern[str],\n ignore_word_regex: Optional[Pattern[str]],\n uri_regex: Pattern[str],\n uri_ignore_words: Set[str],\n) -> List[Match[str]]:\n if not uri_ignore_words:\n return check_matches\n for uri in re.findall(uri_regex, line):\n for uri_word in extract_words(uri, word_regex, ignore_word_regex):\n if uri_word in uri_ignore_words:\n # determine/remove only the first among matches\n for i, match in enumerate(check_matches):\n if match.group() == uri_word:\n check_matches = check_matches[:i] + check_matches[i + 1 :]\n break\n return check_matches\n\n\ndef parse_file(\n filename: str,\n colors: TermColors,\n summary: Optional[Summary],\n misspellings: Dict[str, Misspelling],\n exclude_lines: Set[str],\n file_opener: FileOpener,\n word_regex: Pattern[str],\n ignore_word_regex: Optional[Pattern[str]],\n uri_regex: Pattern[str],\n uri_ignore_words: Set[str],\n context: Optional[Tuple[int, int]],\n options: argparse.Namespace,\n) -> int:\n bad_count = 0\n lines = None\n changed = False\n encoding = encodings[0] # if not defined, use UTF-8\n\n if filename == \"-\":\n f = sys.stdin\n lines = f.readlines()\n else:\n if options.check_filenames:\n for word in extract_words(filename, word_regex, ignore_word_regex):\n lword = word.lower()\n if lword not in misspellings:\n continue\n fix = misspellings[lword].fix\n fixword = fix_case(word, misspellings[lword].data)\n\n if summary and fix:\n summary.update(lword)\n\n cfilename = f\"{colors.FILE}{filename}{colors.DISABLE}\"\n cwrongword = f\"{colors.WWORD}{word}{colors.DISABLE}\"\n crightword = f\"{colors.FWORD}{fixword}{colors.DISABLE}\"\n\n reason = misspellings[lword].reason\n if reason:\n if options.quiet_level & QuietLevels.DISABLED_FIXES:\n continue\n creason = f\" | {colors.FILE}{reason}{colors.DISABLE}\"\n else:\n if options.quiet_level & QuietLevels.NON_AUTOMATIC_FIXES:\n continue\n creason = \"\"\n\n bad_count += 1\n\n print(f\"{cfilename}: {cwrongword} ==> {crightword}{creason}\")\n\n # ignore irregular files\n if not os.path.isfile(filename):\n return bad_count\n\n try:\n text = is_text_file(filename)\n except PermissionError as e:\n print(f\"WARNING: {e.strerror}: {filename}\", file=sys.stderr)\n return bad_count\n except OSError:\n return bad_count\n\n if not text:\n if not options.quiet_level & QuietLevels.BINARY_FILE:\n print(f\"WARNING: Binary file: {filename}\", file=sys.stderr)\n return bad_count\n try:\n lines, encoding = file_opener.open(filename)\n except OSError:\n return bad_count\n\n for i, line in enumerate(lines):\n if line in exclude_lines:\n continue\n\n fixed_words = set()\n asked_for = set()\n\n # If all URI spelling errors will be ignored, erase any URI before\n # extracting words. Otherwise, apply ignores after extracting words.\n # This ensures that if a URI ignore word occurs both inside a URI and\n # outside, it will still be a spelling error.\n if \"*\" in uri_ignore_words:\n line = uri_regex.sub(\" \", line)\n check_matches = extract_words_iter(line, word_regex, ignore_word_regex)\n if \"*\" not in uri_ignore_words:\n check_matches = apply_uri_ignore_words(\n check_matches,\n line,\n word_regex,\n ignore_word_regex,\n uri_regex,\n uri_ignore_words,\n )\n for match in check_matches:\n word = match.group()\n lword = word.lower()\n if lword in misspellings:\n # Sometimes we find a 'misspelling' which is actually a valid word\n # preceded by a string escape sequence. Ignore such cases as\n # they're usually false alarms; see issue #17 among others.\n char_before_idx = match.start() - 1\n if (\n char_before_idx >= 0\n and line[char_before_idx] == \"\\\\\"\n # bell, backspace, formfeed, newline, carriage-return, tab, vtab.\n and word.startswith((\"a\", \"b\", \"f\", \"n\", \"r\", \"t\", \"v\"))\n and lword[1:] not in misspellings\n ):\n continue\n\n context_shown = False\n fix = misspellings[lword].fix\n fixword = fix_case(word, misspellings[lword].data)\n\n if options.interactive and lword not in asked_for:\n if context is not None:\n context_shown = True\n print_context(lines, i, context)\n fix, fixword = ask_for_word_fix(\n lines[i],\n match,\n misspellings[lword],\n options.interactive,\n colors=colors,\n )\n asked_for.add(lword)\n\n if summary and fix:\n summary.update(lword)\n\n if word in fixed_words: # can skip because of re.sub below\n continue\n\n if options.write_changes and fix:\n changed = True\n lines[i] = re.sub(r\"\\b%s\\b\" % word, fixword, lines[i])\n fixed_words.add(word)\n continue\n\n # otherwise warning was explicitly set by interactive mode\n if (\n options.interactive & 2\n and not fix\n and not misspellings[lword].reason\n ):\n continue\n\n cfilename = f\"{colors.FILE}{filename}{colors.DISABLE}\"\n cline = f\"{colors.FILE}{i + 1}{colors.DISABLE}\"\n cwrongword = f\"{colors.WWORD}{word}{colors.DISABLE}\"\n crightword = f\"{colors.FWORD}{fixword}{colors.DISABLE}\"\n\n reason = misspellings[lword].reason\n if reason:\n if options.quiet_level & QuietLevels.DISABLED_FIXES:\n continue\n creason = f\" | {colors.FILE}{reason}{colors.DISABLE}\"\n else:\n if options.quiet_level & QuietLevels.NON_AUTOMATIC_FIXES:\n continue\n creason = \"\"\n\n # If we get to this point (uncorrected error) we should change\n # our bad_count and thus return value\n bad_count += 1\n\n if (not context_shown) and (context is not None):\n print_context(lines, i, context)\n if filename != \"-\":\n print(\n f\"{cfilename}:{cline}: {cwrongword} \"\n f\"==> {crightword}{creason}\"\n )\n else:\n print(\n f\"{cline}: {line.strip()}\\n\\t{cwrongword} \"\n f\"==> {crightword}{creason}\"\n )\n\n if changed:\n if filename == \"-\":\n print(\"---\")\n for line in lines:\n print(line, end=\"\")\n else:\n if not options.quiet_level & QuietLevels.FIXES:\n print(\n f\"{colors.FWORD}FIXED:{colors.DISABLE} {filename}\",\n file=sys.stderr,\n )\n with open(filename, \"w\", encoding=encoding, newline=\"\") as f:\n f.writelines(lines)\n return bad_count\n\n\ndef _script_main() -> int:\n \"\"\"Wrap to main() for setuptools.\"\"\"\n return main(*sys.argv[1:])\n\n\ndef main(*args: str) -> int:\n \"\"\"Contains flow control\"\"\"\n options, parser, used_cfg_files = parse_options(args)\n\n # Report used config files\n if not options.quiet_level & QuietLevels.CONFIG_FILES:\n if len(used_cfg_files) > 0:\n print(\"Used config files:\")\n for ifile, cfg_file in enumerate(used_cfg_files, start=1):\n print(f\" {ifile}: {cfg_file}\")\n\n if options.regex and options.write_changes:\n print(\n \"ERROR: --write-changes cannot be used together with --regex\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n word_regex = options.regex or word_regex_def\n try:\n word_regex = re.compile(word_regex)\n except re.error as e:\n print(f'ERROR: invalid --regex \"{word_regex}\" ({e})', file=sys.stderr)\n parser.print_help()\n return EX_USAGE\n\n if options.ignore_regex:\n try:\n ignore_word_regex = re.compile(options.ignore_regex)\n except re.error as e:\n print(\n f'ERROR: invalid --ignore-regex \"{options.ignore_regex}\" ({e})',\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n else:\n ignore_word_regex = None\n\n ignore_words_files = options.ignore_words or []\n ignore_words = parse_ignore_words_option(options.ignore_words_list)\n for ignore_words_file in ignore_words_files:\n if not os.path.isfile(ignore_words_file):\n print(\n f\"ERROR: cannot find ignore-words file: {ignore_words_file}\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n build_ignore_words(ignore_words_file, ignore_words)\n\n uri_regex = options.uri_regex or uri_regex_def\n try:\n uri_regex = re.compile(uri_regex)\n except re.error as e:\n print(\n f'ERROR: invalid --uri-regex \"{uri_regex}\" ({e})',\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n uri_ignore_words = parse_ignore_words_option(options.uri_ignore_words_list)\n\n dictionaries = options.dictionary if options.dictionary else [\"-\"]\n\n use_dictionaries = []\n for dictionary in dictionaries:\n if dictionary == \"-\":\n # figure out which builtin dictionaries to use\n use = sorted(set(options.builtin.split(\",\")))\n for u in use:\n for builtin in _builtin_dictionaries:\n if builtin[0] == u:\n use_dictionaries.append(\n os.path.join(_data_root, f\"dictionary{builtin[2]}.txt\")\n )\n break\n else:\n print(\n f\"ERROR: Unknown builtin dictionary: {u}\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n else:\n if not os.path.isfile(dictionary):\n print(\n f\"ERROR: cannot find dictionary file: {dictionary}\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n use_dictionaries.append(dictionary)\n misspellings: Dict[str, Misspelling] = {}\n for dictionary in use_dictionaries:\n build_dict(dictionary, misspellings, ignore_words)\n colors = TermColors()\n if not options.colors or sys.platform == \"win32\":\n colors.disable()\n\n if options.summary:\n summary = Summary()\n else:\n summary = None\n\n context = None\n if options.context is not None:\n if (options.before_context is not None) or (options.after_context is not None):\n print(\n \"ERROR: --context/-C cannot be used together with \"\n \"--context-before/-B or --context-after/-A\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n context_both = max(0, options.context)\n context = (context_both, context_both)\n elif (options.before_context is not None) or (options.after_context is not None):\n context_before = 0\n context_after = 0\n if options.before_context is not None:\n context_before = max(0, options.before_context)\n if options.after_context is not None:\n context_after = max(0, options.after_context)\n context = (context_before, context_after)\n\n exclude_lines: Set[str] = set()\n if options.exclude_file:\n build_exclude_hashes(options.exclude_file, exclude_lines)\n\n file_opener = FileOpener(options.hard_encoding_detection, options.quiet_level)\n\n glob_match = GlobMatch(options.skip)\n try:\n glob_match.match(\"/random/path\") # does not need a real path\n except re.error:\n print(\n \"ERROR: --skip/-S has been fed an invalid glob, \"\n \"try escaping special characters\",\n file=sys.stderr,\n )\n return EX_USAGE\n\n bad_count = 0\n for filename in options.files:\n # ignore hidden files\n if is_hidden(filename, options.check_hidden):\n continue\n\n if os.path.isdir(filename):\n for root, dirs, files in os.walk(filename):\n if glob_match.match(root): # skip (absolute) directories\n del dirs[:]\n continue\n if is_hidden(root, options.check_hidden): # dir itself hidden\n continue\n for file_ in files:\n # ignore hidden files in directories\n if is_hidden(file_, options.check_hidden):\n continue\n if glob_match.match(file_): # skip files\n continue\n fname = os.path.join(root, file_)\n if glob_match.match(fname): # skip paths\n continue\n bad_count += parse_file(\n fname,\n colors,\n summary,\n misspellings,\n exclude_lines,\n file_opener,\n word_regex,\n ignore_word_regex,\n uri_regex,\n uri_ignore_words,\n context,\n options,\n )\n\n # skip (relative) directories\n dirs[:] = [\n dir_\n for dir_ in dirs\n if not glob_match.match(dir_)\n and not is_hidden(dir_, options.check_hidden)\n ]\n\n elif not glob_match.match(filename): # skip files\n bad_count += parse_file(\n filename,\n colors,\n summary,\n misspellings,\n exclude_lines,\n file_opener,\n word_regex,\n ignore_word_regex,\n uri_regex,\n uri_ignore_words,\n context,\n options,\n )\n\n if summary:\n print(\"\\n-------8<-------\\nSUMMARY:\")\n print(summary)\n if options.count:\n print(bad_count, file=sys.stderr)\n return EX_DATAERR if bad_count else EX_OK\n", "path": "codespell_lib/_codespell.py" } ]
[ { "content": "#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see\n# https://www.gnu.org/licenses/old-licenses/gpl-2.0.html.\n\"\"\"\nCopyright (C) 2010-2011 Lucas De Marchi <[email protected]>\nCopyright (C) 2011 ProFUSION embedded systems\n\"\"\"\n\nimport argparse\nimport configparser\nimport fnmatch\nimport os\nimport re\nimport sys\nimport textwrap\nfrom typing import Dict, List, Match, Optional, Pattern, Sequence, Set, Tuple\n\n# autogenerated by setuptools_scm\nfrom ._version import __version__ as VERSION # type: ignore # noqa: N812\n\nword_regex_def = r\"[\\w\\-'’]+\"\n# While we want to treat characters like ( or \" as okay for a starting break,\n# these may occur unescaped in URIs, and so we are more restrictive on the\n# endpoint. Emails are more restrictive, so the endpoint remains flexible.\nuri_regex_def = (\n \"(\\\\b(?:https?|[ts]?ftp|file|git|smb)://[^\\\\s]+(?=$|\\\\s)|\"\n \"\\\\b[\\\\w.%+-]+@[\\\\w.-]+\\\\b)\"\n)\n# Pass all misspellings through this translation table to generate\n# alternative misspellings and fixes.\nalt_chars = ((\"'\", \"’\"),)\nencodings = (\"utf-8\", \"iso-8859-1\")\nUSAGE = \"\"\"\n\\t%prog [OPTIONS] [file1 file2 ... fileN]\n\"\"\"\n\nsupported_languages_en = (\"en\", \"en_GB\", \"en_US\", \"en_CA\", \"en_AU\")\nsupported_languages = supported_languages_en\n\n# Users might want to link this file into /usr/local/bin, so we resolve the\n# symbolic link path to the real path if necessary.\n_data_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data\")\n_builtin_dictionaries = (\n # name, desc, name, err in aspell, correction in aspell, \\\n # err dictionary array, rep dictionary array\n # The arrays must contain the names of aspell dictionaries\n # The aspell tests here aren't the ideal state, but the None's are\n # realistic for obscure words\n (\"clear\", \"for unambiguous errors\", \"\", False, None, supported_languages_en, None),\n (\n \"rare\",\n \"for rare (but valid) words that are likely to be errors\",\n \"_rare\", # noqa: E501\n None,\n None,\n None,\n None,\n ),\n (\n \"informal\",\n \"for making informal words more formal\",\n \"_informal\",\n True,\n True,\n supported_languages_en,\n supported_languages_en,\n ),\n (\n \"usage\",\n \"for replacing phrasing with recommended terms\",\n \"_usage\",\n None,\n None,\n None,\n None,\n ),\n (\n \"code\",\n \"for words from code and/or mathematics that are likely to be typos in other contexts (such as uint)\", # noqa: E501\n \"_code\",\n None,\n None,\n None,\n None,\n ),\n (\n \"names\",\n \"for valid proper names that might be typos\",\n \"_names\",\n None,\n None,\n None,\n None,\n ),\n (\n \"en-GB_to_en-US\",\n \"for corrections from en-GB to en-US\",\n \"_en-GB_to_en-US\", # noqa: E501\n True,\n True,\n (\"en_GB\",),\n (\"en_US\",),\n ),\n)\n_builtin_default = \"clear,rare\"\n\n# docs say os.EX_USAGE et al. are only available on Unix systems, so to be safe\n# we protect and just use the values they are on macOS and Linux\nEX_OK = 0\nEX_USAGE = 64\nEX_DATAERR = 65\n\n# OPTIONS:\n#\n# ARGUMENTS:\n# dict_filename The file containing the dictionary of misspellings.\n# If set to '-', it will be read from stdin\n# file1 .. fileN Files to check spelling\n\n\nclass QuietLevels:\n NONE = 0\n ENCODING = 1\n BINARY_FILE = 2\n DISABLED_FIXES = 4\n NON_AUTOMATIC_FIXES = 8\n FIXES = 16\n CONFIG_FILES = 32\n\n\nclass GlobMatch:\n def __init__(self, pattern: Optional[str]) -> None:\n self.pattern_list: Optional[List[str]]\n if pattern:\n # Pattern might be a list of comma-delimited strings\n self.pattern_list = \",\".join(pattern).split(\",\")\n else:\n self.pattern_list = None\n\n def match(self, filename: str) -> bool:\n if self.pattern_list is None:\n return False\n\n return any(fnmatch.fnmatch(filename, p) for p in self.pattern_list)\n\n\nclass Misspelling:\n def __init__(self, data: str, fix: bool, reason: str) -> None:\n self.data = data\n self.fix = fix\n self.reason = reason\n\n\nclass TermColors:\n def __init__(self) -> None:\n self.FILE = \"\\033[33m\"\n self.WWORD = \"\\033[31m\"\n self.FWORD = \"\\033[32m\"\n self.DISABLE = \"\\033[0m\"\n\n def disable(self) -> None:\n self.FILE = \"\"\n self.WWORD = \"\"\n self.FWORD = \"\"\n self.DISABLE = \"\"\n\n\nclass Summary:\n def __init__(self) -> None:\n self.summary: Dict[str, int] = {}\n\n def update(self, wrongword: str) -> None:\n if wrongword in self.summary:\n self.summary[wrongword] += 1\n else:\n self.summary[wrongword] = 1\n\n def __str__(self) -> str:\n keys = list(self.summary.keys())\n keys.sort()\n\n return \"\\n\".join(\n [f\"{key}{self.summary.get(key):{15 - len(key)}}\" for key in keys]\n )\n\n\nclass FileOpener:\n def __init__(self, use_chardet: bool, quiet_level: int) -> None:\n self.use_chardet = use_chardet\n if use_chardet:\n self.init_chardet()\n self.quiet_level = quiet_level\n\n def init_chardet(self) -> None:\n try:\n from chardet.universaldetector import UniversalDetector\n except ImportError:\n raise ImportError(\n \"There's no chardet installed to import from. \"\n \"Please, install it and check your PYTHONPATH \"\n \"environment variable\"\n )\n\n self.encdetector = UniversalDetector()\n\n def open(self, filename: str) -> Tuple[List[str], str]:\n if self.use_chardet:\n return self.open_with_chardet(filename)\n return self.open_with_internal(filename)\n\n def open_with_chardet(self, filename: str) -> Tuple[List[str], str]:\n self.encdetector.reset()\n with open(filename, \"rb\") as fb:\n for line in fb:\n self.encdetector.feed(line)\n if self.encdetector.done:\n break\n self.encdetector.close()\n encoding = self.encdetector.result[\"encoding\"]\n\n try:\n f = open(filename, encoding=encoding, newline=\"\")\n except UnicodeDecodeError:\n print(f\"ERROR: Could not detect encoding: {filename}\", file=sys.stderr)\n raise\n except LookupError:\n print(\n f\"ERROR: Don't know how to handle encoding {encoding}: {filename}\",\n file=sys.stderr,\n )\n raise\n else:\n lines = f.readlines()\n f.close()\n\n return lines, f.encoding\n\n def open_with_internal(self, filename: str) -> Tuple[List[str], str]:\n encoding = None\n first_try = True\n for encoding in encodings:\n if first_try:\n first_try = False\n elif not self.quiet_level & QuietLevels.ENCODING:\n print(f'WARNING: Trying next encoding \"{encoding}\"', file=sys.stderr)\n with open(filename, encoding=encoding, newline=\"\") as f:\n try:\n lines = f.readlines()\n except UnicodeDecodeError:\n if not self.quiet_level & QuietLevels.ENCODING:\n print(\n f'WARNING: Cannot decode file using encoding \"{encoding}\": '\n f\"{filename}\",\n file=sys.stderr,\n )\n else:\n break\n else:\n raise Exception(\"Unknown encoding\")\n\n return lines, encoding\n\n\n# -.-:-.-:-.-:-.:-.-:-.-:-.-:-.-:-.:-.-:-.-:-.-:-.-:-.:-.-:-\n\n\n# If someday this breaks, we can just switch to using RawTextHelpFormatter,\n# but it has the disadvantage of not wrapping our long lines.\n\n\nclass NewlineHelpFormatter(argparse.HelpFormatter):\n \"\"\"Help formatter that preserves newlines and deals with lists.\"\"\"\n\n def _split_lines(self, text: str, width: int) -> List[str]:\n parts = text.split(\"\\n\")\n out = []\n for part in parts:\n # Eventually we could allow others...\n indent_start = \"- \"\n if part.startswith(indent_start):\n offset = len(indent_start)\n else:\n offset = 0\n part = part[offset:]\n part = self._whitespace_matcher.sub(\" \", part).strip()\n parts = textwrap.wrap(part, width - offset)\n parts = [\" \" * offset + p for p in parts]\n if offset:\n parts[0] = indent_start + parts[0][offset:]\n out.extend(parts)\n return out\n\n\ndef parse_options(\n args: Sequence[str],\n) -> Tuple[argparse.Namespace, argparse.ArgumentParser, List[str]]:\n parser = argparse.ArgumentParser(formatter_class=NewlineHelpFormatter)\n\n parser.set_defaults(colors=sys.stdout.isatty())\n parser.add_argument(\"--version\", action=\"version\", version=VERSION)\n\n parser.add_argument(\n \"-d\",\n \"--disable-colors\",\n action=\"store_false\",\n dest=\"colors\",\n help=\"disable colors, even when printing to terminal \"\n \"(always set for Windows)\",\n )\n parser.add_argument(\n \"-c\",\n \"--enable-colors\",\n action=\"store_true\",\n dest=\"colors\",\n help=\"enable colors, even when not printing to terminal\",\n )\n\n parser.add_argument(\n \"-w\",\n \"--write-changes\",\n action=\"store_true\",\n default=False,\n help=\"write changes in place if possible\",\n )\n\n parser.add_argument(\n \"-D\",\n \"--dictionary\",\n action=\"append\",\n help=\"custom dictionary file that contains spelling \"\n \"corrections. If this flag is not specified or \"\n 'equals \"-\" then the default dictionary is used. '\n \"This option can be specified multiple times.\",\n )\n builtin_opts = \"\\n- \".join(\n [\"\"] + [f\"{d[0]!r} {d[1]}\" for d in _builtin_dictionaries]\n )\n parser.add_argument(\n \"--builtin\",\n dest=\"builtin\",\n default=_builtin_default,\n metavar=\"BUILTIN-LIST\",\n help=\"comma-separated list of builtin dictionaries \"\n 'to include (when \"-D -\" or no \"-D\" is passed). '\n \"Current options are:\" + builtin_opts + \"\\n\"\n \"The default is %(default)r.\",\n )\n parser.add_argument(\n \"--ignore-regex\",\n action=\"store\",\n type=str,\n help=\"regular expression that is used to find \"\n \"patterns to ignore by treating as whitespace. \"\n \"When writing regular expressions, consider \"\n \"ensuring there are boundary non-word chars, \"\n 'e.g., \"\\\\bmatch\\\\b\". Defaults to '\n \"empty/disabled.\",\n )\n parser.add_argument(\n \"-I\",\n \"--ignore-words\",\n action=\"append\",\n metavar=\"FILE\",\n help=\"file that contains words that will be ignored \"\n \"by codespell. File must contain 1 word per line.\"\n \" Words are case sensitive based on how they are \"\n \"written in the dictionary file\",\n )\n parser.add_argument(\n \"-L\",\n \"--ignore-words-list\",\n action=\"append\",\n metavar=\"WORDS\",\n help=\"comma separated list of words to be ignored \"\n \"by codespell. Words are case sensitive based on \"\n \"how they are written in the dictionary file\",\n )\n parser.add_argument(\n \"--uri-ignore-words-list\",\n action=\"append\",\n metavar=\"WORDS\",\n help=\"comma separated list of words to be ignored \"\n \"by codespell in URIs and emails only. Words are \"\n \"case sensitive based on how they are written in \"\n 'the dictionary file. If set to \"*\", all '\n \"misspelling in URIs and emails will be ignored.\",\n )\n parser.add_argument(\n \"-r\",\n \"--regex\",\n action=\"store\",\n type=str,\n help=\"regular expression that is used to find words. \"\n \"By default any alphanumeric character, the \"\n \"underscore, the hyphen, and the apostrophe are \"\n \"used to build words. This option cannot be \"\n \"specified together with --write-changes.\",\n )\n parser.add_argument(\n \"--uri-regex\",\n action=\"store\",\n type=str,\n help=\"regular expression that is used to find URIs \"\n \"and emails. A default expression is provided.\",\n )\n parser.add_argument(\n \"-s\",\n \"--summary\",\n action=\"store_true\",\n default=False,\n help=\"print summary of fixes\",\n )\n\n parser.add_argument(\n \"--count\",\n action=\"store_true\",\n default=False,\n help=\"print the number of errors as the last line of stderr\",\n )\n\n parser.add_argument(\n \"-S\",\n \"--skip\",\n action=\"append\",\n help=\"comma-separated list of files to skip. It \"\n \"accepts globs as well. E.g.: if you want \"\n \"codespell to skip .eps and .txt files, \"\n 'you\\'d give \"*.eps,*.txt\" to this option.',\n )\n\n parser.add_argument(\n \"-x\",\n \"--exclude-file\",\n type=str,\n metavar=\"FILE\",\n help=\"ignore whole lines that match those \"\n \"in the file FILE. The lines in FILE \"\n \"should match the to-be-excluded lines exactly\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store\",\n type=int,\n default=0,\n help=\"set interactive mode when writing changes:\\n\"\n \"- 0: no interactivity.\\n\"\n \"- 1: ask for confirmation.\\n\"\n \"- 2: ask user to choose one fix when more than one is available.\\n\" # noqa: E501\n \"- 3: both 1 and 2\",\n )\n\n parser.add_argument(\n \"-q\",\n \"--quiet-level\",\n action=\"store\",\n type=int,\n default=34,\n help=\"bitmask that allows suppressing messages:\\n\"\n \"- 0: print all messages.\\n\"\n \"- 1: disable warnings about wrong encoding.\\n\"\n \"- 2: disable warnings about binary files.\\n\"\n \"- 4: omit warnings about automatic fixes that were disabled in the dictionary.\\n\" # noqa: E501\n \"- 8: don't print anything for non-automatic fixes.\\n\" # noqa: E501\n \"- 16: don't print the list of fixed files.\\n\"\n \"- 32: don't print configuration files.\\n\"\n \"As usual with bitmasks, these levels can be \"\n \"combined; e.g. use 3 for levels 1+2, 7 for \"\n \"1+2+4, 23 for 1+2+4+16, etc. \"\n \"The default mask is %(default)s.\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--hard-encoding-detection\",\n action=\"store_true\",\n default=False,\n help=\"use chardet to detect the encoding of each \"\n \"file. This can slow down codespell, but is more \"\n \"reliable in detecting encodings other than \"\n \"utf-8, iso8859-1, and ascii.\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--check-filenames\",\n action=\"store_true\",\n default=False,\n help=\"check file names as well\",\n )\n\n parser.add_argument(\n \"-H\",\n \"--check-hidden\",\n action=\"store_true\",\n default=False,\n help=\"check hidden files and directories (those \" 'starting with \".\") as well.',\n )\n parser.add_argument(\n \"-A\",\n \"--after-context\",\n type=int,\n metavar=\"LINES\",\n help=\"print LINES of trailing context\",\n )\n parser.add_argument(\n \"-B\",\n \"--before-context\",\n type=int,\n metavar=\"LINES\",\n help=\"print LINES of leading context\",\n )\n parser.add_argument(\n \"-C\",\n \"--context\",\n type=int,\n metavar=\"LINES\",\n help=\"print LINES of surrounding context\",\n )\n parser.add_argument(\"--config\", type=str, help=\"path to config file.\")\n parser.add_argument(\"--toml\", type=str, help=\"path to a pyproject.toml file.\")\n parser.add_argument(\"files\", nargs=\"*\", help=\"files or directories to check\")\n\n # Parse command line options.\n options = parser.parse_args(list(args))\n\n # Load config files and look for ``codespell`` options.\n cfg_files = [\"setup.cfg\", \".codespellrc\"]\n if options.config:\n cfg_files.append(options.config)\n config = configparser.ConfigParser(interpolation=None)\n\n # Read toml before other config files.\n toml_files = []\n tomllib_raise_error = False\n if os.path.isfile(\"pyproject.toml\"):\n toml_files.append(\"pyproject.toml\")\n if options.toml:\n toml_files.append(options.toml)\n tomllib_raise_error = True\n if toml_files:\n if sys.version_info >= (3, 11):\n import tomllib\n else:\n try:\n import tomli as tomllib # type: ignore[no-redef]\n except ImportError as e:\n if tomllib_raise_error:\n raise ImportError(\n f\"tomllib or tomli are required to read pyproject.toml \"\n f\"but could not be imported, got: {e}\"\n ) from None\n tomllib = None # type: ignore[assignment]\n if tomllib is not None:\n for toml_file in toml_files:\n with open(toml_file, \"rb\") as f:\n data = tomllib.load(f).get(\"tool\", {})\n config.read_dict(data)\n\n # Collect which config files are going to be used\n used_cfg_files = []\n for cfg_file in cfg_files:\n _cfg = configparser.ConfigParser()\n _cfg.read(cfg_file)\n if _cfg.has_section(\"codespell\"):\n used_cfg_files.append(cfg_file)\n\n # Use config files\n config.read(cfg_files)\n if config.has_section(\"codespell\"):\n # Build a \"fake\" argv list using option name and value.\n cfg_args = []\n for key in config[\"codespell\"]:\n # Add option as arg.\n cfg_args.append(f\"--{key}\")\n # If value is blank, skip.\n val = config[\"codespell\"][key]\n if val:\n cfg_args.append(val)\n\n # Parse config file options.\n options = parser.parse_args(cfg_args)\n\n # Re-parse command line options to override config.\n options = parser.parse_args(list(args), namespace=options)\n\n if not options.files:\n options.files.append(\".\")\n\n return options, parser, used_cfg_files\n\n\ndef parse_ignore_words_option(ignore_words_option: List[str]) -> Set[str]:\n ignore_words = set()\n if ignore_words_option:\n for comma_separated_words in ignore_words_option:\n for word in comma_separated_words.split(\",\"):\n ignore_words.add(word.strip())\n return ignore_words\n\n\ndef build_exclude_hashes(filename: str, exclude_lines: Set[str]) -> None:\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n exclude_lines.add(line)\n\n\ndef build_ignore_words(filename: str, ignore_words: Set[str]) -> None:\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n ignore_words.add(line.strip())\n\n\ndef add_misspelling(\n key: str,\n data: str,\n misspellings: Dict[str, Misspelling],\n) -> None:\n data = data.strip()\n\n if \",\" in data:\n fix = False\n data, reason = data.rsplit(\",\", 1)\n reason = reason.lstrip()\n else:\n fix = True\n reason = \"\"\n\n misspellings[key] = Misspelling(data, fix, reason)\n\n\ndef build_dict(\n filename: str,\n misspellings: Dict[str, Misspelling],\n ignore_words: Set[str],\n) -> None:\n with open(filename, encoding=\"utf-8\") as f:\n translate_tables = [(x, str.maketrans(x, y)) for x, y in alt_chars]\n for line in f:\n [key, data] = line.split(\"->\")\n # TODO for now, convert both to lower. Someday we can maybe add\n # support for fixing caps.\n key = key.lower()\n data = data.lower()\n if key not in ignore_words:\n add_misspelling(key, data, misspellings)\n # generate alternative misspellings/fixes\n for x, table in translate_tables:\n if x in key:\n alt_key = key.translate(table)\n alt_data = data.translate(table)\n if alt_key not in ignore_words:\n add_misspelling(alt_key, alt_data, misspellings)\n\n\ndef is_hidden(filename: str, check_hidden: bool) -> bool:\n bfilename = os.path.basename(filename)\n\n return bfilename not in (\"\", \".\", \"..\") and (\n not check_hidden and bfilename[0] == \".\"\n )\n\n\ndef is_text_file(filename: str) -> bool:\n with open(filename, mode=\"rb\") as f:\n s = f.read(1024)\n return b\"\\x00\" not in s\n\n\ndef fix_case(word: str, fixword: str) -> str:\n if word == word.capitalize():\n return \", \".join(w.strip().capitalize() for w in fixword.split(\",\"))\n if word == word.upper():\n return fixword.upper()\n # they are both lower case\n # or we don't have any idea\n return fixword\n\n\ndef ask_for_word_fix(\n line: str,\n match: Match[str],\n misspelling: Misspelling,\n interactivity: int,\n colors: TermColors,\n) -> Tuple[bool, str]:\n wrongword = match.group()\n if interactivity <= 0:\n return misspelling.fix, fix_case(wrongword, misspelling.data)\n\n line_ui = (\n f\"{line[:match.start()]}\"\n f\"{colors.WWORD}{wrongword}{colors.DISABLE}\"\n f\"{line[match.end():]}\"\n )\n\n if misspelling.fix and interactivity & 1:\n r = \"\"\n fixword = fix_case(wrongword, misspelling.data)\n while not r:\n print(f\"{line_ui}\\t{wrongword} ==> {fixword} (Y/n) \", end=\"\", flush=True)\n r = sys.stdin.readline().strip().upper()\n if not r:\n r = \"Y\"\n if r not in (\"Y\", \"N\"):\n print(\"Say 'y' or 'n'\")\n r = \"\"\n\n if r == \"N\":\n misspelling.fix = False\n\n elif (interactivity & 2) and not misspelling.reason:\n # if it is not disabled, i.e. it just has more than one possible fix,\n # we ask the user which word to use\n\n r = \"\"\n opt = [w.strip() for w in misspelling.data.split(\",\")]\n while not r:\n print(f\"{line_ui} Choose an option (blank for none): \", end=\"\")\n for i, o in enumerate(opt):\n fixword = fix_case(wrongword, o)\n print(f\" {i}) {fixword}\", end=\"\")\n print(\": \", end=\"\", flush=True)\n\n n = sys.stdin.readline().strip()\n if not n:\n break\n\n try:\n i = int(n)\n r = opt[i]\n except (ValueError, IndexError):\n print(\"Not a valid option\\n\")\n\n if r:\n misspelling.fix = True\n misspelling.data = r\n\n return misspelling.fix, fix_case(wrongword, misspelling.data)\n\n\ndef print_context(\n lines: List[str],\n index: int,\n context: Tuple[int, int],\n) -> None:\n # context = (context_before, context_after)\n for i in range(index - context[0], index + context[1] + 1):\n if 0 <= i < len(lines):\n print(f\"{'>' if i == index else ':'} {lines[i].rstrip()}\")\n\n\ndef _ignore_word_sub(\n text: str,\n ignore_word_regex: Optional[Pattern[str]],\n) -> str:\n if ignore_word_regex:\n text = ignore_word_regex.sub(\" \", text)\n return text\n\n\ndef extract_words(\n text: str,\n word_regex: Pattern[str],\n ignore_word_regex: Optional[Pattern[str]],\n) -> List[str]:\n return word_regex.findall(_ignore_word_sub(text, ignore_word_regex))\n\n\ndef extract_words_iter(\n text: str,\n word_regex: Pattern[str],\n ignore_word_regex: Optional[Pattern[str]],\n) -> List[Match[str]]:\n return list(word_regex.finditer(_ignore_word_sub(text, ignore_word_regex)))\n\n\ndef apply_uri_ignore_words(\n check_matches: List[Match[str]],\n line: str,\n word_regex: Pattern[str],\n ignore_word_regex: Optional[Pattern[str]],\n uri_regex: Pattern[str],\n uri_ignore_words: Set[str],\n) -> List[Match[str]]:\n if not uri_ignore_words:\n return check_matches\n for uri in re.findall(uri_regex, line):\n for uri_word in extract_words(uri, word_regex, ignore_word_regex):\n if uri_word in uri_ignore_words:\n # determine/remove only the first among matches\n for i, match in enumerate(check_matches):\n if match.group() == uri_word:\n check_matches = check_matches[:i] + check_matches[i + 1 :]\n break\n return check_matches\n\n\ndef parse_file(\n filename: str,\n colors: TermColors,\n summary: Optional[Summary],\n misspellings: Dict[str, Misspelling],\n exclude_lines: Set[str],\n file_opener: FileOpener,\n word_regex: Pattern[str],\n ignore_word_regex: Optional[Pattern[str]],\n uri_regex: Pattern[str],\n uri_ignore_words: Set[str],\n context: Optional[Tuple[int, int]],\n options: argparse.Namespace,\n) -> int:\n bad_count = 0\n lines = None\n changed = False\n encoding = encodings[0] # if not defined, use UTF-8\n\n if filename == \"-\":\n f = sys.stdin\n lines = f.readlines()\n else:\n if options.check_filenames:\n for word in extract_words(filename, word_regex, ignore_word_regex):\n lword = word.lower()\n if lword not in misspellings:\n continue\n fix = misspellings[lword].fix\n fixword = fix_case(word, misspellings[lword].data)\n\n if summary and fix:\n summary.update(lword)\n\n cfilename = f\"{colors.FILE}{filename}{colors.DISABLE}\"\n cwrongword = f\"{colors.WWORD}{word}{colors.DISABLE}\"\n crightword = f\"{colors.FWORD}{fixword}{colors.DISABLE}\"\n\n reason = misspellings[lword].reason\n if reason:\n if options.quiet_level & QuietLevels.DISABLED_FIXES:\n continue\n creason = f\" | {colors.FILE}{reason}{colors.DISABLE}\"\n else:\n if options.quiet_level & QuietLevels.NON_AUTOMATIC_FIXES:\n continue\n creason = \"\"\n\n bad_count += 1\n\n print(f\"{cfilename}: {cwrongword} ==> {crightword}{creason}\")\n\n # ignore irregular files\n if not os.path.isfile(filename):\n return bad_count\n\n try:\n text = is_text_file(filename)\n except PermissionError as e:\n print(f\"WARNING: {e.strerror}: {filename}\", file=sys.stderr)\n return bad_count\n except OSError:\n return bad_count\n\n if not text:\n if not options.quiet_level & QuietLevels.BINARY_FILE:\n print(f\"WARNING: Binary file: {filename}\", file=sys.stderr)\n return bad_count\n try:\n lines, encoding = file_opener.open(filename)\n except OSError:\n return bad_count\n\n for i, line in enumerate(lines):\n if line in exclude_lines:\n continue\n\n fixed_words = set()\n asked_for = set()\n\n # If all URI spelling errors will be ignored, erase any URI before\n # extracting words. Otherwise, apply ignores after extracting words.\n # This ensures that if a URI ignore word occurs both inside a URI and\n # outside, it will still be a spelling error.\n if \"*\" in uri_ignore_words:\n line = uri_regex.sub(\" \", line)\n check_matches = extract_words_iter(line, word_regex, ignore_word_regex)\n if \"*\" not in uri_ignore_words:\n check_matches = apply_uri_ignore_words(\n check_matches,\n line,\n word_regex,\n ignore_word_regex,\n uri_regex,\n uri_ignore_words,\n )\n for match in check_matches:\n word = match.group()\n lword = word.lower()\n if lword in misspellings:\n # Sometimes we find a 'misspelling' which is actually a valid word\n # preceded by a string escape sequence. Ignore such cases as\n # they're usually false alarms; see issue #17 among others.\n char_before_idx = match.start() - 1\n if (\n char_before_idx >= 0\n and line[char_before_idx] == \"\\\\\"\n # bell, backspace, formfeed, newline, carriage-return, tab, vtab.\n and word.startswith((\"a\", \"b\", \"f\", \"n\", \"r\", \"t\", \"v\"))\n and lword[1:] not in misspellings\n ):\n continue\n\n context_shown = False\n fix = misspellings[lword].fix\n fixword = fix_case(word, misspellings[lword].data)\n\n if options.interactive and lword not in asked_for:\n if context is not None:\n context_shown = True\n print_context(lines, i, context)\n fix, fixword = ask_for_word_fix(\n lines[i],\n match,\n misspellings[lword],\n options.interactive,\n colors=colors,\n )\n asked_for.add(lword)\n\n if summary and fix:\n summary.update(lword)\n\n if word in fixed_words: # can skip because of re.sub below\n continue\n\n if options.write_changes and fix:\n changed = True\n lines[i] = re.sub(r\"\\b%s\\b\" % word, fixword, lines[i])\n fixed_words.add(word)\n continue\n\n # otherwise warning was explicitly set by interactive mode\n if (\n options.interactive & 2\n and not fix\n and not misspellings[lword].reason\n ):\n continue\n\n cfilename = f\"{colors.FILE}{filename}{colors.DISABLE}\"\n cline = f\"{colors.FILE}{i + 1}{colors.DISABLE}\"\n cwrongword = f\"{colors.WWORD}{word}{colors.DISABLE}\"\n crightword = f\"{colors.FWORD}{fixword}{colors.DISABLE}\"\n\n reason = misspellings[lword].reason\n if reason:\n if options.quiet_level & QuietLevels.DISABLED_FIXES:\n continue\n creason = f\" | {colors.FILE}{reason}{colors.DISABLE}\"\n else:\n if options.quiet_level & QuietLevels.NON_AUTOMATIC_FIXES:\n continue\n creason = \"\"\n\n # If we get to this point (uncorrected error) we should change\n # our bad_count and thus return value\n bad_count += 1\n\n if (not context_shown) and (context is not None):\n print_context(lines, i, context)\n if filename != \"-\":\n print(\n f\"{cfilename}:{cline}: {cwrongword} \"\n f\"==> {crightword}{creason}\"\n )\n else:\n print(\n f\"{cline}: {line.strip()}\\n\\t{cwrongword} \"\n f\"==> {crightword}{creason}\"\n )\n\n if changed:\n if filename == \"-\":\n print(\"---\")\n for line in lines:\n print(line, end=\"\")\n else:\n if not options.quiet_level & QuietLevels.FIXES:\n print(\n f\"{colors.FWORD}FIXED:{colors.DISABLE} {filename}\",\n file=sys.stderr,\n )\n with open(filename, \"w\", encoding=encoding, newline=\"\") as f:\n f.writelines(lines)\n return bad_count\n\n\ndef _script_main() -> int:\n \"\"\"Wrap to main() for setuptools.\"\"\"\n return main(*sys.argv[1:])\n\n\ndef main(*args: str) -> int:\n \"\"\"Contains flow control\"\"\"\n options, parser, used_cfg_files = parse_options(args)\n\n # Report used config files\n if not options.quiet_level & QuietLevels.CONFIG_FILES:\n if len(used_cfg_files) > 0:\n print(\"Used config files:\")\n for ifile, cfg_file in enumerate(used_cfg_files, start=1):\n print(f\" {ifile}: {cfg_file}\")\n\n if options.regex and options.write_changes:\n print(\n \"ERROR: --write-changes cannot be used together with --regex\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n word_regex = options.regex or word_regex_def\n try:\n word_regex = re.compile(word_regex)\n except re.error as e:\n print(f'ERROR: invalid --regex \"{word_regex}\" ({e})', file=sys.stderr)\n parser.print_help()\n return EX_USAGE\n\n if options.ignore_regex:\n try:\n ignore_word_regex = re.compile(options.ignore_regex)\n except re.error as e:\n print(\n f'ERROR: invalid --ignore-regex \"{options.ignore_regex}\" ({e})',\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n else:\n ignore_word_regex = None\n\n ignore_words_files = options.ignore_words or []\n ignore_words = parse_ignore_words_option(options.ignore_words_list)\n for ignore_words_file in ignore_words_files:\n if not os.path.isfile(ignore_words_file):\n print(\n f\"ERROR: cannot find ignore-words file: {ignore_words_file}\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n build_ignore_words(ignore_words_file, ignore_words)\n\n uri_regex = options.uri_regex or uri_regex_def\n try:\n uri_regex = re.compile(uri_regex)\n except re.error as e:\n print(\n f'ERROR: invalid --uri-regex \"{uri_regex}\" ({e})',\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n uri_ignore_words = parse_ignore_words_option(options.uri_ignore_words_list)\n\n dictionaries = options.dictionary if options.dictionary else [\"-\"]\n\n use_dictionaries = []\n for dictionary in dictionaries:\n if dictionary == \"-\":\n # figure out which builtin dictionaries to use\n use = sorted(set(options.builtin.split(\",\")))\n for u in use:\n for builtin in _builtin_dictionaries:\n if builtin[0] == u:\n use_dictionaries.append(\n os.path.join(_data_root, f\"dictionary{builtin[2]}.txt\")\n )\n break\n else:\n print(\n f\"ERROR: Unknown builtin dictionary: {u}\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n else:\n if not os.path.isfile(dictionary):\n print(\n f\"ERROR: cannot find dictionary file: {dictionary}\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n use_dictionaries.append(dictionary)\n misspellings: Dict[str, Misspelling] = {}\n for dictionary in use_dictionaries:\n build_dict(dictionary, misspellings, ignore_words)\n colors = TermColors()\n if not options.colors or sys.platform == \"win32\":\n colors.disable()\n\n if options.summary:\n summary = Summary()\n else:\n summary = None\n\n context = None\n if options.context is not None:\n if (options.before_context is not None) or (options.after_context is not None):\n print(\n \"ERROR: --context/-C cannot be used together with \"\n \"--context-before/-B or --context-after/-A\",\n file=sys.stderr,\n )\n parser.print_help()\n return EX_USAGE\n context_both = max(0, options.context)\n context = (context_both, context_both)\n elif (options.before_context is not None) or (options.after_context is not None):\n context_before = 0\n context_after = 0\n if options.before_context is not None:\n context_before = max(0, options.before_context)\n if options.after_context is not None:\n context_after = max(0, options.after_context)\n context = (context_before, context_after)\n\n exclude_lines: Set[str] = set()\n if options.exclude_file:\n build_exclude_hashes(options.exclude_file, exclude_lines)\n\n file_opener = FileOpener(options.hard_encoding_detection, options.quiet_level)\n\n glob_match = GlobMatch(options.skip)\n try:\n glob_match.match(\"/random/path\") # does not need a real path\n except re.error:\n print(\n \"ERROR: --skip/-S has been fed an invalid glob, \"\n \"try escaping special characters\",\n file=sys.stderr,\n )\n return EX_USAGE\n\n bad_count = 0\n for filename in options.files:\n # ignore hidden files\n if is_hidden(filename, options.check_hidden):\n continue\n\n if os.path.isdir(filename):\n for root, dirs, files in os.walk(filename):\n if glob_match.match(root): # skip (absolute) directories\n del dirs[:]\n continue\n if is_hidden(root, options.check_hidden): # dir itself hidden\n continue\n for file_ in files:\n # ignore hidden files in directories\n if is_hidden(file_, options.check_hidden):\n continue\n if glob_match.match(file_): # skip files\n continue\n fname = os.path.join(root, file_)\n if glob_match.match(fname): # skip paths\n continue\n bad_count += parse_file(\n fname,\n colors,\n summary,\n misspellings,\n exclude_lines,\n file_opener,\n word_regex,\n ignore_word_regex,\n uri_regex,\n uri_ignore_words,\n context,\n options,\n )\n\n # skip (relative) directories\n dirs[:] = [\n dir_\n for dir_ in dirs\n if not glob_match.match(dir_)\n and not is_hidden(dir_, options.check_hidden)\n ]\n\n elif not glob_match.match(filename): # skip files\n bad_count += parse_file(\n filename,\n colors,\n summary,\n misspellings,\n exclude_lines,\n file_opener,\n word_regex,\n ignore_word_regex,\n uri_regex,\n uri_ignore_words,\n context,\n options,\n )\n\n if summary:\n print(\"\\n-------8<-------\\nSUMMARY:\")\n print(summary)\n if options.count:\n print(bad_count, file=sys.stderr)\n return EX_DATAERR if bad_count else EX_OK\n", "path": "codespell_lib/_codespell.py" } ]
diff --git a/codespell_lib/_codespell.py b/codespell_lib/_codespell.py index 1fe8c6306c..e7d2236b78 100644 --- a/codespell_lib/_codespell.py +++ b/codespell_lib/_codespell.py @@ -190,10 +190,7 @@ def __str__(self) -> str: keys.sort() return "\n".join( - [ - "{0}{1:{width}}".format(key, self.summary.get(key), width=15 - len(key)) - for key in keys - ] + [f"{key}{self.summary.get(key):{15 - len(key)}}" for key in keys] )
urllib3__urllib3-1912
Add additional documentation for HTTPSConnection parameters I'm not sure if this is intentional or not, but the reference doc for connection objects ( https://urllib3.readthedocs.io/en/latest/reference/index.html#urllib3.connection.VerifiedHTTPSConnection ) seems a little bit spare on details about the parameters accepted. In particular, I was looking at using `server_hostname`, and I didn't find it anywhere in the reference doc. I did eventually find it (I assume with the same meaning) documented on [this utility function](https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.ssl_wrap_socket). Coming to these docs having very rarely needed to work directly with urllib3 (it's usually the transport used by some higher-level library I'm using, like `requests`), it seemed like the docs for the connection object were missing bits. If I were to open a PR adding a line to the docstring for each parameter, would it be welcome? Or are these params meant to be covered elsewhere in the docs?
[ { "content": "from __future__ import absolute_import\nimport re\nimport datetime\nimport logging\nimport os\nimport socket\nfrom socket import error as SocketError, timeout as SocketTimeout\nimport warnings\nfrom .packages import six\nfrom .packages.six.moves.http_client import HTTPConnection as _HTTPConnection\nfrom .packages.six.moves.http_client import HTTPException # noqa: F401\n\ntry: # Compiled with SSL?\n import ssl\n\n BaseSSLError = ssl.SSLError\nexcept (ImportError, AttributeError): # Platform-specific: No SSL.\n ssl = None\n\n class BaseSSLError(BaseException):\n pass\n\n\ntry:\n # Python 3: not a no-op, we're adding this to the namespace so it can be imported.\n ConnectionError = ConnectionError\nexcept NameError:\n # Python 2\n class ConnectionError(Exception):\n pass\n\n\nfrom .exceptions import (\n NewConnectionError,\n ConnectTimeoutError,\n SubjectAltNameWarning,\n SystemTimeWarning,\n)\nfrom .packages.ssl_match_hostname import match_hostname, CertificateError\n\nfrom .util.ssl_ import (\n resolve_cert_reqs,\n resolve_ssl_version,\n assert_fingerprint,\n create_urllib3_context,\n ssl_wrap_socket,\n)\n\n\nfrom .util import connection, SUPPRESS_USER_AGENT\n\nfrom ._collections import HTTPHeaderDict\nfrom ._version import __version__\n\nlog = logging.getLogger(__name__)\n\nport_by_scheme = {\"http\": 80, \"https\": 443}\n\n# When it comes time to update this value as a part of regular maintenance\n# (ie test_recent_date is failing) update it to ~6 months before the current date.\nRECENT_DATE = datetime.date(2019, 1, 1)\n\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n\n\nclass DummyConnection(object):\n \"\"\"Used to detect a failed ConnectionCls import.\"\"\"\n\n pass\n\n\nclass HTTPConnection(_HTTPConnection, object):\n \"\"\"\n Based on httplib.HTTPConnection but provides an extra constructor\n backwards-compatibility layer between older and newer Pythons.\n\n Additional keyword parameters are used to configure attributes of the connection.\n Accepted parameters include:\n\n - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`\n - ``source_address``: Set the source address for the current connection.\n - ``socket_options``: Set specific options on the underlying socket. If not specified, then\n defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling\n Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.\n\n For example, if you wish to enable TCP Keep Alive in addition to the defaults,\n you might pass::\n\n HTTPConnection.default_socket_options + [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n ]\n\n Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).\n \"\"\"\n\n default_port = port_by_scheme[\"http\"]\n\n #: Disable Nagle's algorithm by default.\n #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``\n default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]\n\n #: Whether this connection verifies the host's certificate.\n is_verified = False\n\n def __init__(self, *args, **kw):\n if not six.PY2:\n kw.pop(\"strict\", None)\n\n # Pre-set source_address.\n self.source_address = kw.get(\"source_address\")\n\n #: The socket options provided by the user. If no options are\n #: provided, we use the default options.\n self.socket_options = kw.pop(\"socket_options\", self.default_socket_options)\n _HTTPConnection.__init__(self, *args, **kw)\n\n @property\n def host(self):\n \"\"\"\n Getter method to remove any trailing dots that indicate the hostname is an FQDN.\n\n In general, SSL certificates don't include the trailing dot indicating a\n fully-qualified domain name, and thus, they don't validate properly when\n checked against a domain name that includes the dot. In addition, some\n servers may not expect to receive the trailing dot when provided.\n\n However, the hostname with trailing dot is critical to DNS resolution; doing a\n lookup with the trailing dot will properly only resolve the appropriate FQDN,\n whereas a lookup without a trailing dot will search the system's search domain\n list. Thus, it's important to keep the original host around for use only in\n those cases where it's appropriate (i.e., when doing DNS lookup to establish the\n actual TCP connection across which we're going to send HTTP requests).\n \"\"\"\n return self._dns_host.rstrip(\".\")\n\n @host.setter\n def host(self, value):\n \"\"\"\n Setter for the `host` property.\n\n We assume that only urllib3 uses the _dns_host attribute; httplib itself\n only uses `host`, and it seems reasonable that other libraries follow suit.\n \"\"\"\n self._dns_host = value\n\n def _new_conn(self):\n \"\"\" Establish a socket connection and set nodelay settings on it.\n\n :return: New socket connection.\n \"\"\"\n extra_kw = {}\n if self.source_address:\n extra_kw[\"source_address\"] = self.source_address\n\n if self.socket_options:\n extra_kw[\"socket_options\"] = self.socket_options\n\n try:\n conn = connection.create_connection(\n (self._dns_host, self.port), self.timeout, **extra_kw\n )\n\n except SocketTimeout:\n raise ConnectTimeoutError(\n self,\n \"Connection to %s timed out. (connect timeout=%s)\"\n % (self.host, self.timeout),\n )\n\n except SocketError as e:\n raise NewConnectionError(\n self, \"Failed to establish a new connection: %s\" % e\n )\n\n return conn\n\n def _is_using_tunnel(self):\n # Google App Engine's httplib does not define _tunnel_host\n return getattr(self, \"_tunnel_host\", None)\n\n def _prepare_conn(self, conn):\n self.sock = conn\n if self._is_using_tunnel():\n # TODO: Fix tunnel so it doesn't depend on self.sock state.\n self._tunnel()\n # Mark this connection as not reusable\n self.auto_open = 0\n\n def connect(self):\n conn = self._new_conn()\n self._prepare_conn(conn)\n\n def putrequest(self, method, url, *args, **kwargs):\n \"\"\"Send a request to the server\"\"\"\n match = _CONTAINS_CONTROL_CHAR_RE.search(method)\n if match:\n raise ValueError(\n \"Method cannot contain non-token characters %r (found at least %r)\"\n % (method, match.group())\n )\n\n return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)\n\n def request(self, method, url, body=None, headers=None):\n headers = HTTPHeaderDict(headers if headers is not None else {})\n if \"user-agent\" not in headers:\n headers[\"User-Agent\"] = _get_default_user_agent()\n elif headers[\"user-agent\"] == SUPPRESS_USER_AGENT:\n del headers[\"user-agent\"]\n super(HTTPConnection, self).request(method, url, body=body, headers=headers)\n\n def request_chunked(self, method, url, body=None, headers=None):\n \"\"\"\n Alternative to the common request method, which sends the\n body with chunked encoding and not as one block\n \"\"\"\n headers = HTTPHeaderDict(headers if headers is not None else {})\n skip_accept_encoding = \"accept-encoding\" in headers\n skip_host = \"host\" in headers\n self.putrequest(\n method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host\n )\n if \"user-agent\" not in headers:\n headers[\"User-Agent\"] = _get_default_user_agent()\n elif headers[\"user-agent\"] == SUPPRESS_USER_AGENT:\n del headers[\"user-agent\"]\n for header, value in headers.items():\n self.putheader(header, value)\n if \"transfer-encoding\" not in headers:\n self.putheader(\"Transfer-Encoding\", \"chunked\")\n self.endheaders()\n\n if body is not None:\n stringish_types = six.string_types + (bytes,)\n if isinstance(body, stringish_types):\n body = (body,)\n for chunk in body:\n if not chunk:\n continue\n if not isinstance(chunk, bytes):\n chunk = chunk.encode(\"utf8\")\n len_str = hex(len(chunk))[2:]\n to_send = bytearray(len_str.encode())\n to_send += b\"\\r\\n\"\n to_send += chunk\n to_send += b\"\\r\\n\"\n self.send(to_send)\n\n # After the if clause, to always have a closed body\n self.send(b\"0\\r\\n\\r\\n\")\n\n\nclass HTTPSConnection(HTTPConnection):\n default_port = port_by_scheme[\"https\"]\n\n cert_reqs = None\n ca_certs = None\n ca_cert_dir = None\n ca_cert_data = None\n ssl_version = None\n assert_fingerprint = None\n\n def __init__(\n self,\n host,\n port=None,\n key_file=None,\n cert_file=None,\n key_password=None,\n strict=None,\n timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\n ssl_context=None,\n server_hostname=None,\n **kw\n ):\n\n HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.key_password = key_password\n self.ssl_context = ssl_context\n self.server_hostname = server_hostname\n\n # Required property for Google AppEngine 1.9.0 which otherwise causes\n # HTTPS requests to go out as HTTP. (See Issue #356)\n self._protocol = \"https\"\n\n def set_cert(\n self,\n key_file=None,\n cert_file=None,\n cert_reqs=None,\n key_password=None,\n ca_certs=None,\n assert_hostname=None,\n assert_fingerprint=None,\n ca_cert_dir=None,\n ca_cert_data=None,\n ):\n \"\"\"\n This method should only be called once, before the connection is used.\n \"\"\"\n # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also\n # have an SSLContext object in which case we'll use its verify_mode.\n if cert_reqs is None:\n if self.ssl_context is not None:\n cert_reqs = self.ssl_context.verify_mode\n else:\n cert_reqs = resolve_cert_reqs(None)\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.key_password = key_password\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n self.ca_certs = ca_certs and os.path.expanduser(ca_certs)\n self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)\n self.ca_cert_data = ca_cert_data\n\n def connect(self):\n # Add certificate verification\n conn = self._new_conn()\n hostname = self.host\n\n if self._is_using_tunnel():\n self.sock = conn\n\n # Calls self._set_hostport(), so self.host is\n # self._tunnel_host below.\n self._tunnel()\n # Mark this connection as not reusable\n self.auto_open = 0\n\n # Override the host with the one we're requesting data from.\n hostname = self._tunnel_host\n\n server_hostname = hostname\n if self.server_hostname is not None:\n server_hostname = self.server_hostname\n\n is_time_off = datetime.date.today() < RECENT_DATE\n if is_time_off:\n warnings.warn(\n (\n \"System time is way off (before {0}). This will probably \"\n \"lead to SSL verification errors\"\n ).format(RECENT_DATE),\n SystemTimeWarning,\n )\n\n # Wrap socket using verification with the root certs in\n # trusted_root_certs\n default_ssl_context = False\n if self.ssl_context is None:\n default_ssl_context = True\n self.ssl_context = create_urllib3_context(\n ssl_version=resolve_ssl_version(self.ssl_version),\n cert_reqs=resolve_cert_reqs(self.cert_reqs),\n )\n\n context = self.ssl_context\n context.verify_mode = resolve_cert_reqs(self.cert_reqs)\n\n # Try to load OS default certs if none are given.\n # Works well on Windows (requires Python3.4+)\n if (\n not self.ca_certs\n and not self.ca_cert_dir\n and not self.ca_cert_data\n and default_ssl_context\n and hasattr(context, \"load_default_certs\")\n ):\n context.load_default_certs()\n\n self.sock = ssl_wrap_socket(\n sock=conn,\n keyfile=self.key_file,\n certfile=self.cert_file,\n key_password=self.key_password,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n ca_cert_data=self.ca_cert_data,\n server_hostname=server_hostname,\n ssl_context=context,\n )\n\n if self.assert_fingerprint:\n assert_fingerprint(\n self.sock.getpeercert(binary_form=True), self.assert_fingerprint\n )\n elif (\n context.verify_mode != ssl.CERT_NONE\n and not getattr(context, \"check_hostname\", False)\n and self.assert_hostname is not False\n ):\n # While urllib3 attempts to always turn off hostname matching from\n # the TLS library, this cannot always be done. So we check whether\n # the TLS Library still thinks it's matching hostnames.\n cert = self.sock.getpeercert()\n if not cert.get(\"subjectAltName\", ()):\n warnings.warn(\n (\n \"Certificate for {0} has no `subjectAltName`, falling back to check for a \"\n \"`commonName` for now. This feature is being removed by major browsers and \"\n \"deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 \"\n \"for details.)\".format(hostname)\n ),\n SubjectAltNameWarning,\n )\n _match_hostname(cert, self.assert_hostname or server_hostname)\n\n self.is_verified = (\n context.verify_mode == ssl.CERT_REQUIRED\n or self.assert_fingerprint is not None\n )\n\n\ndef _match_hostname(cert, asserted_hostname):\n try:\n match_hostname(cert, asserted_hostname)\n except CertificateError as e:\n log.warning(\n \"Certificate did not match expected hostname: %s. Certificate: %s\",\n asserted_hostname,\n cert,\n )\n # Add cert to exception and reraise so client code can inspect\n # the cert when catching the exception, if they want to\n e._peer_cert = cert\n raise\n\n\ndef _get_default_user_agent():\n return \"python-urllib3/%s\" % __version__\n\n\nif not ssl:\n HTTPSConnection = DummyConnection # noqa: F811\n\n\nVerifiedHTTPSConnection = HTTPSConnection\n", "path": "src/urllib3/connection.py" } ]
[ { "content": "from __future__ import absolute_import\nimport re\nimport datetime\nimport logging\nimport os\nimport socket\nfrom socket import error as SocketError, timeout as SocketTimeout\nimport warnings\nfrom .packages import six\nfrom .packages.six.moves.http_client import HTTPConnection as _HTTPConnection\nfrom .packages.six.moves.http_client import HTTPException # noqa: F401\n\ntry: # Compiled with SSL?\n import ssl\n\n BaseSSLError = ssl.SSLError\nexcept (ImportError, AttributeError): # Platform-specific: No SSL.\n ssl = None\n\n class BaseSSLError(BaseException):\n pass\n\n\ntry:\n # Python 3: not a no-op, we're adding this to the namespace so it can be imported.\n ConnectionError = ConnectionError\nexcept NameError:\n # Python 2\n class ConnectionError(Exception):\n pass\n\n\nfrom .exceptions import (\n NewConnectionError,\n ConnectTimeoutError,\n SubjectAltNameWarning,\n SystemTimeWarning,\n)\nfrom .packages.ssl_match_hostname import match_hostname, CertificateError\n\nfrom .util.ssl_ import (\n resolve_cert_reqs,\n resolve_ssl_version,\n assert_fingerprint,\n create_urllib3_context,\n ssl_wrap_socket,\n)\n\n\nfrom .util import connection, SUPPRESS_USER_AGENT\n\nfrom ._collections import HTTPHeaderDict\nfrom ._version import __version__\n\nlog = logging.getLogger(__name__)\n\nport_by_scheme = {\"http\": 80, \"https\": 443}\n\n# When it comes time to update this value as a part of regular maintenance\n# (ie test_recent_date is failing) update it to ~6 months before the current date.\nRECENT_DATE = datetime.date(2019, 1, 1)\n\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n\n\nclass DummyConnection(object):\n \"\"\"Used to detect a failed ConnectionCls import.\"\"\"\n\n pass\n\n\nclass HTTPConnection(_HTTPConnection, object):\n \"\"\"\n Based on httplib.HTTPConnection but provides an extra constructor\n backwards-compatibility layer between older and newer Pythons.\n\n Additional keyword parameters are used to configure attributes of the connection.\n Accepted parameters include:\n\n - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`\n - ``source_address``: Set the source address for the current connection.\n - ``socket_options``: Set specific options on the underlying socket. If not specified, then\n defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling\n Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.\n\n For example, if you wish to enable TCP Keep Alive in addition to the defaults,\n you might pass::\n\n HTTPConnection.default_socket_options + [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n ]\n\n Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).\n \"\"\"\n\n default_port = port_by_scheme[\"http\"]\n\n #: Disable Nagle's algorithm by default.\n #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``\n default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]\n\n #: Whether this connection verifies the host's certificate.\n is_verified = False\n\n def __init__(self, *args, **kw):\n if not six.PY2:\n kw.pop(\"strict\", None)\n\n # Pre-set source_address.\n self.source_address = kw.get(\"source_address\")\n\n #: The socket options provided by the user. If no options are\n #: provided, we use the default options.\n self.socket_options = kw.pop(\"socket_options\", self.default_socket_options)\n _HTTPConnection.__init__(self, *args, **kw)\n\n @property\n def host(self):\n \"\"\"\n Getter method to remove any trailing dots that indicate the hostname is an FQDN.\n\n In general, SSL certificates don't include the trailing dot indicating a\n fully-qualified domain name, and thus, they don't validate properly when\n checked against a domain name that includes the dot. In addition, some\n servers may not expect to receive the trailing dot when provided.\n\n However, the hostname with trailing dot is critical to DNS resolution; doing a\n lookup with the trailing dot will properly only resolve the appropriate FQDN,\n whereas a lookup without a trailing dot will search the system's search domain\n list. Thus, it's important to keep the original host around for use only in\n those cases where it's appropriate (i.e., when doing DNS lookup to establish the\n actual TCP connection across which we're going to send HTTP requests).\n \"\"\"\n return self._dns_host.rstrip(\".\")\n\n @host.setter\n def host(self, value):\n \"\"\"\n Setter for the `host` property.\n\n We assume that only urllib3 uses the _dns_host attribute; httplib itself\n only uses `host`, and it seems reasonable that other libraries follow suit.\n \"\"\"\n self._dns_host = value\n\n def _new_conn(self):\n \"\"\" Establish a socket connection and set nodelay settings on it.\n\n :return: New socket connection.\n \"\"\"\n extra_kw = {}\n if self.source_address:\n extra_kw[\"source_address\"] = self.source_address\n\n if self.socket_options:\n extra_kw[\"socket_options\"] = self.socket_options\n\n try:\n conn = connection.create_connection(\n (self._dns_host, self.port), self.timeout, **extra_kw\n )\n\n except SocketTimeout:\n raise ConnectTimeoutError(\n self,\n \"Connection to %s timed out. (connect timeout=%s)\"\n % (self.host, self.timeout),\n )\n\n except SocketError as e:\n raise NewConnectionError(\n self, \"Failed to establish a new connection: %s\" % e\n )\n\n return conn\n\n def _is_using_tunnel(self):\n # Google App Engine's httplib does not define _tunnel_host\n return getattr(self, \"_tunnel_host\", None)\n\n def _prepare_conn(self, conn):\n self.sock = conn\n if self._is_using_tunnel():\n # TODO: Fix tunnel so it doesn't depend on self.sock state.\n self._tunnel()\n # Mark this connection as not reusable\n self.auto_open = 0\n\n def connect(self):\n conn = self._new_conn()\n self._prepare_conn(conn)\n\n def putrequest(self, method, url, *args, **kwargs):\n \"\"\"Send a request to the server\"\"\"\n match = _CONTAINS_CONTROL_CHAR_RE.search(method)\n if match:\n raise ValueError(\n \"Method cannot contain non-token characters %r (found at least %r)\"\n % (method, match.group())\n )\n\n return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)\n\n def request(self, method, url, body=None, headers=None):\n headers = HTTPHeaderDict(headers if headers is not None else {})\n if \"user-agent\" not in headers:\n headers[\"User-Agent\"] = _get_default_user_agent()\n elif headers[\"user-agent\"] == SUPPRESS_USER_AGENT:\n del headers[\"user-agent\"]\n super(HTTPConnection, self).request(method, url, body=body, headers=headers)\n\n def request_chunked(self, method, url, body=None, headers=None):\n \"\"\"\n Alternative to the common request method, which sends the\n body with chunked encoding and not as one block\n \"\"\"\n headers = HTTPHeaderDict(headers if headers is not None else {})\n skip_accept_encoding = \"accept-encoding\" in headers\n skip_host = \"host\" in headers\n self.putrequest(\n method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host\n )\n if \"user-agent\" not in headers:\n headers[\"User-Agent\"] = _get_default_user_agent()\n elif headers[\"user-agent\"] == SUPPRESS_USER_AGENT:\n del headers[\"user-agent\"]\n for header, value in headers.items():\n self.putheader(header, value)\n if \"transfer-encoding\" not in headers:\n self.putheader(\"Transfer-Encoding\", \"chunked\")\n self.endheaders()\n\n if body is not None:\n stringish_types = six.string_types + (bytes,)\n if isinstance(body, stringish_types):\n body = (body,)\n for chunk in body:\n if not chunk:\n continue\n if not isinstance(chunk, bytes):\n chunk = chunk.encode(\"utf8\")\n len_str = hex(len(chunk))[2:]\n to_send = bytearray(len_str.encode())\n to_send += b\"\\r\\n\"\n to_send += chunk\n to_send += b\"\\r\\n\"\n self.send(to_send)\n\n # After the if clause, to always have a closed body\n self.send(b\"0\\r\\n\\r\\n\")\n\n\nclass HTTPSConnection(HTTPConnection):\n \"\"\"\n Many of the parameters to this constructor are passed to the underlying SSL\n socket by means of :py:func:`util.ssl_wrap_socket`.\n \"\"\"\n\n default_port = port_by_scheme[\"https\"]\n\n cert_reqs = None\n ca_certs = None\n ca_cert_dir = None\n ca_cert_data = None\n ssl_version = None\n assert_fingerprint = None\n\n def __init__(\n self,\n host,\n port=None,\n key_file=None,\n cert_file=None,\n key_password=None,\n strict=None,\n timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\n ssl_context=None,\n server_hostname=None,\n **kw\n ):\n\n HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.key_password = key_password\n self.ssl_context = ssl_context\n self.server_hostname = server_hostname\n\n # Required property for Google AppEngine 1.9.0 which otherwise causes\n # HTTPS requests to go out as HTTP. (See Issue #356)\n self._protocol = \"https\"\n\n def set_cert(\n self,\n key_file=None,\n cert_file=None,\n cert_reqs=None,\n key_password=None,\n ca_certs=None,\n assert_hostname=None,\n assert_fingerprint=None,\n ca_cert_dir=None,\n ca_cert_data=None,\n ):\n \"\"\"\n This method should only be called once, before the connection is used.\n \"\"\"\n # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also\n # have an SSLContext object in which case we'll use its verify_mode.\n if cert_reqs is None:\n if self.ssl_context is not None:\n cert_reqs = self.ssl_context.verify_mode\n else:\n cert_reqs = resolve_cert_reqs(None)\n\n self.key_file = key_file\n self.cert_file = cert_file\n self.cert_reqs = cert_reqs\n self.key_password = key_password\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n self.ca_certs = ca_certs and os.path.expanduser(ca_certs)\n self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)\n self.ca_cert_data = ca_cert_data\n\n def connect(self):\n # Add certificate verification\n conn = self._new_conn()\n hostname = self.host\n\n if self._is_using_tunnel():\n self.sock = conn\n\n # Calls self._set_hostport(), so self.host is\n # self._tunnel_host below.\n self._tunnel()\n # Mark this connection as not reusable\n self.auto_open = 0\n\n # Override the host with the one we're requesting data from.\n hostname = self._tunnel_host\n\n server_hostname = hostname\n if self.server_hostname is not None:\n server_hostname = self.server_hostname\n\n is_time_off = datetime.date.today() < RECENT_DATE\n if is_time_off:\n warnings.warn(\n (\n \"System time is way off (before {0}). This will probably \"\n \"lead to SSL verification errors\"\n ).format(RECENT_DATE),\n SystemTimeWarning,\n )\n\n # Wrap socket using verification with the root certs in\n # trusted_root_certs\n default_ssl_context = False\n if self.ssl_context is None:\n default_ssl_context = True\n self.ssl_context = create_urllib3_context(\n ssl_version=resolve_ssl_version(self.ssl_version),\n cert_reqs=resolve_cert_reqs(self.cert_reqs),\n )\n\n context = self.ssl_context\n context.verify_mode = resolve_cert_reqs(self.cert_reqs)\n\n # Try to load OS default certs if none are given.\n # Works well on Windows (requires Python3.4+)\n if (\n not self.ca_certs\n and not self.ca_cert_dir\n and not self.ca_cert_data\n and default_ssl_context\n and hasattr(context, \"load_default_certs\")\n ):\n context.load_default_certs()\n\n self.sock = ssl_wrap_socket(\n sock=conn,\n keyfile=self.key_file,\n certfile=self.cert_file,\n key_password=self.key_password,\n ca_certs=self.ca_certs,\n ca_cert_dir=self.ca_cert_dir,\n ca_cert_data=self.ca_cert_data,\n server_hostname=server_hostname,\n ssl_context=context,\n )\n\n if self.assert_fingerprint:\n assert_fingerprint(\n self.sock.getpeercert(binary_form=True), self.assert_fingerprint\n )\n elif (\n context.verify_mode != ssl.CERT_NONE\n and not getattr(context, \"check_hostname\", False)\n and self.assert_hostname is not False\n ):\n # While urllib3 attempts to always turn off hostname matching from\n # the TLS library, this cannot always be done. So we check whether\n # the TLS Library still thinks it's matching hostnames.\n cert = self.sock.getpeercert()\n if not cert.get(\"subjectAltName\", ()):\n warnings.warn(\n (\n \"Certificate for {0} has no `subjectAltName`, falling back to check for a \"\n \"`commonName` for now. This feature is being removed by major browsers and \"\n \"deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 \"\n \"for details.)\".format(hostname)\n ),\n SubjectAltNameWarning,\n )\n _match_hostname(cert, self.assert_hostname or server_hostname)\n\n self.is_verified = (\n context.verify_mode == ssl.CERT_REQUIRED\n or self.assert_fingerprint is not None\n )\n\n\ndef _match_hostname(cert, asserted_hostname):\n try:\n match_hostname(cert, asserted_hostname)\n except CertificateError as e:\n log.warning(\n \"Certificate did not match expected hostname: %s. Certificate: %s\",\n asserted_hostname,\n cert,\n )\n # Add cert to exception and reraise so client code can inspect\n # the cert when catching the exception, if they want to\n e._peer_cert = cert\n raise\n\n\ndef _get_default_user_agent():\n return \"python-urllib3/%s\" % __version__\n\n\nif not ssl:\n HTTPSConnection = DummyConnection # noqa: F811\n\n\nVerifiedHTTPSConnection = HTTPSConnection\n", "path": "src/urllib3/connection.py" } ]
diff --git a/docs/advanced-usage.rst b/docs/advanced-usage.rst index e36287a064..1a9b007b43 100644 --- a/docs/advanced-usage.rst +++ b/docs/advanced-usage.rst @@ -172,6 +172,45 @@ verified with that bundle will succeed. It's recommended to use a separate :class:`~poolmanager.PoolManager` to make requests to URLs that do not need the custom certificate. +.. _sni_custom: + +Custom SNI Hostname +------------------- + +If you want to create a connection to a host over HTTPS which uses SNI, there +are two places where the hostname is expected. It must be included in the Host +header sent, so that the server will know which host is being requested. The +hostname should also match the certificate served by the server, which is +checked by urllib3. + +Normally, urllib3 takes care of setting and checking these values for you when +you connect to a host by name. However, it's sometimes useful to set a +connection's expected Host header and certificate hostname (subject), +especially when you are connecting without using name resolution. For example, +you could connect to a server by IP using HTTPS like so:: + + >>> import urllib3 + >>> pool = urllib3.HTTPSConnectionPool( + ... "10.0.0.10", + ... assert_hostname="example.org", + ... server_hostname="example.org" + ... ) + >>> pool.urlopen( + ... "GET", + ... "/", + ... headers={"Host": "example.org"}, + ... assert_same_host=False + ... ) + + +Note that when you use a connection in this way, you must specify +``assert_same_host=False``. + +This is useful when DNS resolution for ``example.org`` does not match the +address that you would like to use. The IP may be for a private interface, or +you may want to use a specific host under round-robin DNS. + + .. _ssl_client: Client certificates diff --git a/src/urllib3/connection.py b/src/urllib3/connection.py index cf9cfee9a1..8c63f1fa8d 100644 --- a/src/urllib3/connection.py +++ b/src/urllib3/connection.py @@ -251,6 +251,11 @@ def request_chunked(self, method, url, body=None, headers=None): class HTTPSConnection(HTTPConnection): + """ + Many of the parameters to this constructor are passed to the underlying SSL + socket by means of :py:func:`util.ssl_wrap_socket`. + """ + default_port = port_by_scheme["https"] cert_reqs = None
keras-team__keras-1158
problem with K.common._FLOATX I tried to run: ``` a = K.random_normal((100, 200)) ``` and I got the error: ``` /home/eders/python/Theano/theano/sandbox/rng_mrg.pyc in get_substream_rstates(self, n_streams, dtype, inc_rstate) 1167 1168 """ -> 1169 assert isinstance(dtype, str) 1170 assert n_streams < 2**72 1171 assert n_streams > 0 AssertionError: ``` I tried to print K.common._FLOATX to see what was going on and it is `u'float32'`. That little `u` upfront is making theano crash. I believe that when reading the type from json it was not converted to the right type of string. Anybody else had that problem? I'll check the code to see if I can fix it.
[ { "content": "import numpy as np\n\n# the type of float to use throughout the session.\n_FLOATX = 'float32'\n_EPSILON = 10e-8\n\n\ndef epsilon():\n return _EPSILON\n\n\ndef set_epsilon(e):\n global _EPSILON\n _EPSILON = e\n\n\ndef floatx():\n return _FLOATX\n\n\ndef set_floatx(floatx):\n global _FLOATX\n if floatx not in {'float32', 'float64'}:\n raise Exception('Unknown floatx type: ' + str(floatx))\n _FLOATX = floatx\n\n\ndef cast_to_floatx(x):\n '''Cast a Numpy array to floatx.\n '''\n return np.asarray(x, dtype=_FLOATX)\n", "path": "keras/backend/common.py" } ]
[ { "content": "import numpy as np\n\n# the type of float to use throughout the session.\n_FLOATX = 'float32'\n_EPSILON = 10e-8\n\n\ndef epsilon():\n return _EPSILON\n\n\ndef set_epsilon(e):\n global _EPSILON\n _EPSILON = e\n\n\ndef floatx():\n return _FLOATX\n\n\ndef set_floatx(floatx):\n global _FLOATX\n if floatx not in {'float32', 'float64'}:\n raise Exception('Unknown floatx type: ' + str(floatx))\n if isinstance(floatx, unicode):\n floatx = floatx.encode('ascii')\n _FLOATX = floatx\n\n\ndef cast_to_floatx(x):\n '''Cast a Numpy array to floatx.\n '''\n return np.asarray(x, dtype=_FLOATX)\n", "path": "keras/backend/common.py" } ]
diff --git a/keras/backend/common.py b/keras/backend/common.py index 1a84aaf37c41..86284713e297 100644 --- a/keras/backend/common.py +++ b/keras/backend/common.py @@ -22,6 +22,8 @@ def set_floatx(floatx): global _FLOATX if floatx not in {'float32', 'float64'}: raise Exception('Unknown floatx type: ' + str(floatx)) + if isinstance(floatx, unicode): + floatx = floatx.encode('ascii') _FLOATX = floatx
apache__airflow-23674
PythonSensor is not considering mode='reschedule', instead marking task UP_FOR_RETRY ### Apache Airflow version 2.3.0 (latest released) ### What happened A PythonSensor that works on versions <2.3.0 in mode reschedule is now marking the task as `UP_FOR_RETRY` instead. Log says: ``` [2022-05-02, 15:48:23 UTC] {python.py:66} INFO - Poking callable: <function test at 0x7fd56286bc10> [2022-05-02, 15:48:23 UTC] {taskinstance.py:1853} INFO - Rescheduling task, marking task as UP_FOR_RESCHEDULE [2022-05-02, 15:48:23 UTC] {local_task_job.py:156} INFO - Task exited with return code 0 [2022-05-02, 15:48:23 UTC] {local_task_job.py:273} INFO - 0 downstream tasks scheduled from follow-on schedule check ``` But it directly marks it as `UP_FOR_RETRY` and then follows `retry_delay` and `retries` ### What you think should happen instead It should mark the task as `UP_FOR_RESCHEDULE` and reschedule it according to the `poke_interval` ### How to reproduce ``` from datetime import datetime, timedelta from airflow import DAG from airflow.sensors.python import PythonSensor def test(): return False default_args = { "owner": "airflow", "depends_on_past": False, "start_date": datetime(2022, 5, 2), "email_on_failure": False, "email_on_retry": False, "retries": 1, "retry_delay": timedelta(minutes=1), } dag = DAG("dag_csdepkrr_development_v001", default_args=default_args, catchup=False, max_active_runs=1, schedule_interval=None) t1 = PythonSensor(task_id="PythonSensor", python_callable=test, poke_interval=30, mode='reschedule', dag=dag) ``` ### Operating System Latest Docker image ### Versions of Apache Airflow Providers ``` apache-airflow-providers-amazon==3.3.0 apache-airflow-providers-celery==2.1.4 apache-airflow-providers-cncf-kubernetes==4.0.1 apache-airflow-providers-docker==2.6.0 apache-airflow-providers-elasticsearch==3.0.3 apache-airflow-providers-ftp==2.1.2 apache-airflow-providers-google==6.8.0 apache-airflow-providers-grpc==2.0.4 apache-airflow-providers-hashicorp==2.2.0 apache-airflow-providers-http==2.1.2 apache-airflow-providers-imap==2.2.3 apache-airflow-providers-microsoft-azure==3.8.0 apache-airflow-providers-mysql==2.2.3 apache-airflow-providers-odbc==2.0.4 apache-airflow-providers-oracle==2.2.3 apache-airflow-providers-postgres==4.1.0 apache-airflow-providers-redis==2.0.4 apache-airflow-providers-sendgrid==2.0.4 apache-airflow-providers-sftp==2.5.2 apache-airflow-providers-slack==4.2.3 apache-airflow-providers-sqlite==2.1.3 apache-airflow-providers-ssh==2.4.3 ``` ### Deployment Docker-Compose ### Deployment details Latest Docker compose from the documentation ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport datetime\nimport functools\nimport hashlib\nimport time\nimport warnings\nfrom datetime import timedelta\nfrom typing import Any, Callable, Iterable, Optional, Union\n\nfrom airflow import settings\nfrom airflow.configuration import conf\nfrom airflow.exceptions import (\n AirflowException,\n AirflowRescheduleException,\n AirflowSensorTimeout,\n AirflowSkipException,\n)\nfrom airflow.models.baseoperator import BaseOperator\nfrom airflow.models.sensorinstance import SensorInstance\nfrom airflow.models.skipmixin import SkipMixin\nfrom airflow.models.taskreschedule import TaskReschedule\nfrom airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep\nfrom airflow.utils import timezone\nfrom airflow.utils.context import Context\n\n# We need to keep the import here because GCSToLocalFilesystemOperator released in\n# Google Provider before 3.0.0 imported apply_defaults from here.\n# See https://github.com/apache/airflow/issues/16035\nfrom airflow.utils.decorators import apply_defaults # noqa: F401\nfrom airflow.utils.docs import get_docs_url\n\n# As documented in https://dev.mysql.com/doc/refman/5.7/en/datetime.html.\n_MYSQL_TIMESTAMP_MAX = datetime.datetime(2038, 1, 19, 3, 14, 7, tzinfo=timezone.utc)\n\n\[email protected]_cache(maxsize=None)\ndef _is_metadatabase_mysql() -> bool:\n if settings.engine is None:\n raise AirflowException(\"Must initialize ORM first\")\n return settings.engine.url.get_backend_name() == \"mysql\"\n\n\nclass PokeReturnValue:\n \"\"\"\n Sensors can optionally return an instance of the PokeReturnValue class in the poke method.\n If an XCom value is supplied when the sensor is done, then the XCom value will be\n pushed through the operator return value.\n :param is_done: Set to true to indicate the sensor can stop poking.\n :param xcom_value: An optional XCOM value to be returned by the operator.\n \"\"\"\n\n def __init__(self, is_done: bool, xcom_value: Optional[Any] = None) -> None:\n self.xcom_value = xcom_value\n self.is_done = is_done\n\n def __bool__(self) -> bool:\n return self.is_done\n\n\nclass BaseSensorOperator(BaseOperator, SkipMixin):\n \"\"\"\n Sensor operators are derived from this class and inherit these attributes.\n\n Sensor operators keep executing at a time interval and succeed when\n a criteria is met and fail if and when they time out.\n\n :param soft_fail: Set to true to mark the task as SKIPPED on failure\n :param poke_interval: Time in seconds that the job should wait in\n between each tries\n :param timeout: Time, in seconds before the task times out and fails.\n :param mode: How the sensor operates.\n Options are: ``{ poke | reschedule }``, default is ``poke``.\n When set to ``poke`` the sensor is taking up a worker slot for its\n whole execution time and sleeps between pokes. Use this mode if the\n expected runtime of the sensor is short or if a short poke interval\n is required. Note that the sensor will hold onto a worker slot and\n a pool slot for the duration of the sensor's runtime in this mode.\n When set to ``reschedule`` the sensor task frees the worker slot when\n the criteria is not yet met and it's rescheduled at a later time. Use\n this mode if the time before the criteria is met is expected to be\n quite long. The poke interval should be more than one minute to\n prevent too much load on the scheduler.\n :param exponential_backoff: allow progressive longer waits between\n pokes by using exponential backoff algorithm\n \"\"\"\n\n ui_color = '#e6f1f2' # type: str\n valid_modes = ['poke', 'reschedule'] # type: Iterable[str]\n\n # As the poke context in smart sensor defines the poking job signature only,\n # The execution_fields defines other execution details\n # for this tasks such as the customer defined timeout, the email and the alert\n # setup. Smart sensor serialize these attributes into a different DB column so\n # that smart sensor service is able to handle corresponding execution details\n # without breaking the sensor poking logic with dedup.\n execution_fields = (\n 'poke_interval',\n 'retries',\n 'execution_timeout',\n 'timeout',\n 'email',\n 'email_on_retry',\n 'email_on_failure',\n )\n\n # Adds one additional dependency for all sensor operators that checks if a\n # sensor task instance can be rescheduled.\n deps = BaseOperator.deps | {ReadyToRescheduleDep()}\n\n def __init__(\n self,\n *,\n poke_interval: float = 60,\n timeout: float = conf.getfloat('sensors', 'default_timeout'),\n soft_fail: bool = False,\n mode: str = 'poke',\n exponential_backoff: bool = False,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.poke_interval = poke_interval\n self.soft_fail = soft_fail\n self.timeout = timeout\n self.mode = mode\n self.exponential_backoff = exponential_backoff\n self._validate_input_values()\n self.sensor_service_enabled = conf.getboolean('smart_sensor', 'use_smart_sensor')\n self.sensors_support_sensor_service = set(\n map(lambda l: l.strip(), conf.get('smart_sensor', 'sensors_enabled').split(','))\n )\n\n def _validate_input_values(self) -> None:\n if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:\n raise AirflowException(\"The poke_interval must be a non-negative number\")\n if not isinstance(self.timeout, (int, float)) or self.timeout < 0:\n raise AirflowException(\"The timeout must be a non-negative number\")\n if self.mode not in self.valid_modes:\n raise AirflowException(\n f\"The mode must be one of {self.valid_modes},'{self.dag.dag_id if self.has_dag() else ''} \"\n f\".{self.task_id}'; received '{self.mode}'.\"\n )\n\n # Quick check for poke_interval isn't immediately over MySQL's TIMESTAMP limit.\n # This check is only rudimentary to catch trivial user errors, e.g. mistakenly\n # set the value to milliseconds instead of seconds. There's another check when\n # we actually try to reschedule to ensure database coherence.\n if self.reschedule and _is_metadatabase_mysql():\n if timezone.utcnow() + datetime.timedelta(seconds=self.poke_interval) > _MYSQL_TIMESTAMP_MAX:\n raise AirflowException(\n f\"Cannot set poke_interval to {self.poke_interval} seconds in reschedule \"\n f\"mode since it will take reschedule time over MySQL's TIMESTAMP limit.\"\n )\n\n def poke(self, context: Context) -> Union[bool, PokeReturnValue]:\n \"\"\"\n Function that the sensors defined while deriving this class should\n override.\n \"\"\"\n raise AirflowException('Override me.')\n\n def is_smart_sensor_compatible(self):\n check_list = [\n not self.sensor_service_enabled,\n self.on_success_callback,\n self.on_retry_callback,\n self.on_failure_callback,\n ]\n if any(check_list):\n return False\n\n operator = self.__class__.__name__\n return operator in self.sensors_support_sensor_service\n\n def register_in_sensor_service(self, ti, context):\n \"\"\"\n Register ti in smart sensor service\n\n :param ti: Task instance object.\n :param context: TaskInstance template context from the ti.\n :return: boolean\n \"\"\"\n docs_url = get_docs_url('concepts/smart-sensors.html#migrating-to-deferrable-operators')\n warnings.warn(\n 'Your sensor is using Smart Sensors, which are deprecated.'\n f' Please use Deferrable Operators instead. See {docs_url} for more info.',\n DeprecationWarning,\n )\n poke_context = self.get_poke_context(context)\n execution_context = self.get_execution_context(context)\n\n return SensorInstance.register(ti, poke_context, execution_context)\n\n def get_poke_context(self, context):\n \"\"\"\n Return a dictionary with all attributes in poke_context_fields. The\n poke_context with operator class can be used to identify a unique\n sensor job.\n\n :param context: TaskInstance template context.\n :return: A dictionary with key in poke_context_fields.\n \"\"\"\n if not context:\n self.log.info(\"Function get_poke_context doesn't have a context input.\")\n\n poke_context_fields = getattr(self.__class__, \"poke_context_fields\", None)\n result = {key: getattr(self, key, None) for key in poke_context_fields}\n return result\n\n def get_execution_context(self, context):\n \"\"\"\n Return a dictionary with all attributes in execution_fields. The\n execution_context include execution requirement for each sensor task\n such as timeout setup, email_alert setup.\n\n :param context: TaskInstance template context.\n :return: A dictionary with key in execution_fields.\n \"\"\"\n if not context:\n self.log.info(\"Function get_execution_context doesn't have a context input.\")\n execution_fields = self.__class__.execution_fields\n\n result = {key: getattr(self, key, None) for key in execution_fields}\n if result['execution_timeout'] and isinstance(result['execution_timeout'], datetime.timedelta):\n result['execution_timeout'] = result['execution_timeout'].total_seconds()\n return result\n\n def execute(self, context: Context) -> Any:\n started_at: Union[datetime.datetime, float]\n\n if self.reschedule:\n\n # If reschedule, use the start date of the first try (first try can be either the very\n # first execution of the task, or the first execution after the task was cleared.)\n first_try_number = context['ti'].max_tries - self.retries + 1\n task_reschedules = TaskReschedule.find_for_task_instance(\n context['ti'], try_number=first_try_number\n )\n if not task_reschedules:\n start_date = timezone.utcnow()\n else:\n start_date = task_reschedules[0].start_date\n started_at = start_date\n\n def run_duration() -> float:\n # If we are in reschedule mode, then we have to compute diff\n # based on the time in a DB, so can't use time.monotonic\n return (timezone.utcnow() - start_date).total_seconds()\n\n else:\n started_at = start_monotonic = time.monotonic()\n\n def run_duration() -> float:\n return time.monotonic() - start_monotonic\n\n try_number = 1\n log_dag_id = self.dag.dag_id if self.has_dag() else \"\"\n\n xcom_value = None\n while True:\n poke_return = self.poke(context)\n if poke_return:\n if isinstance(poke_return, PokeReturnValue):\n xcom_value = poke_return.xcom_value\n break\n\n if run_duration() > self.timeout:\n # If sensor is in soft fail mode but times out raise AirflowSkipException.\n if self.soft_fail:\n raise AirflowSkipException(f\"Snap. Time is OUT. DAG id: {log_dag_id}\")\n else:\n raise AirflowSensorTimeout(f\"Snap. Time is OUT. DAG id: {log_dag_id}\")\n if self.reschedule:\n next_poke_interval = self._get_next_poke_interval(started_at, run_duration, try_number)\n reschedule_date = timezone.utcnow() + timedelta(seconds=next_poke_interval)\n if _is_metadatabase_mysql() and reschedule_date > _MYSQL_TIMESTAMP_MAX:\n raise AirflowSensorTimeout(\n f\"Cannot reschedule DAG {log_dag_id} to {reschedule_date.isoformat()} \"\n f\"since it is over MySQL's TIMESTAMP storage limit.\"\n )\n raise AirflowRescheduleException(reschedule_date)\n else:\n time.sleep(self._get_next_poke_interval(started_at, run_duration, try_number))\n try_number += 1\n self.log.info(\"Success criteria met. Exiting.\")\n return xcom_value\n\n def _get_next_poke_interval(\n self,\n started_at: Union[datetime.datetime, float],\n run_duration: Callable[[], float],\n try_number: int,\n ) -> float:\n \"\"\"Using the similar logic which is used for exponential backoff retry delay for operators.\"\"\"\n if not self.exponential_backoff:\n return self.poke_interval\n\n min_backoff = int(self.poke_interval * (2 ** (try_number - 2)))\n\n run_hash = int(\n hashlib.sha1(f\"{self.dag_id}#{self.task_id}#{started_at}#{try_number}\".encode()).hexdigest(),\n 16,\n )\n modded_hash = min_backoff + run_hash % min_backoff\n\n delay_backoff_in_seconds = min(modded_hash, timedelta.max.total_seconds() - 1)\n new_interval = min(self.timeout - int(run_duration()), delay_backoff_in_seconds)\n self.log.info(\"new %s interval is %s\", self.mode, new_interval)\n return new_interval\n\n def prepare_for_execution(self) -> BaseOperator:\n task = super().prepare_for_execution()\n # Sensors in `poke` mode can block execution of DAGs when running\n # with single process executor, thus we change the mode to`reschedule`\n # to allow parallel task being scheduled and executed\n if conf.get('core', 'executor') == \"DebugExecutor\":\n self.log.warning(\"DebugExecutor changes sensor mode to 'reschedule'.\")\n task.mode = 'reschedule'\n return task\n\n @property\n def reschedule(self):\n \"\"\"Define mode rescheduled sensors.\"\"\"\n return self.mode == 'reschedule'\n\n\ndef poke_mode_only(cls):\n \"\"\"\n Class Decorator for child classes of BaseSensorOperator to indicate\n that instances of this class are only safe to use poke mode.\n\n Will decorate all methods in the class to assert they did not change\n the mode from 'poke'.\n\n :param cls: BaseSensor class to enforce methods only use 'poke' mode.\n \"\"\"\n\n def decorate(cls_type):\n def mode_getter(_):\n return 'poke'\n\n def mode_setter(_, value):\n if value != 'poke':\n raise ValueError(\"cannot set mode to 'poke'.\")\n\n if not issubclass(cls_type, BaseSensorOperator):\n raise ValueError(\n f\"poke_mode_only decorator should only be \"\n f\"applied to subclasses of BaseSensorOperator,\"\n f\" got:{cls_type}.\"\n )\n\n cls_type.mode = property(mode_getter, mode_setter)\n\n return cls_type\n\n return decorate(cls)\n", "path": "airflow/sensors/base.py" } ]
[ { "content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport datetime\nimport functools\nimport hashlib\nimport time\nimport warnings\nfrom datetime import timedelta\nfrom typing import Any, Callable, Iterable, Optional, Union\n\nfrom airflow import settings\nfrom airflow.configuration import conf\nfrom airflow.exceptions import (\n AirflowException,\n AirflowRescheduleException,\n AirflowSensorTimeout,\n AirflowSkipException,\n)\nfrom airflow.models.baseoperator import BaseOperator\nfrom airflow.models.sensorinstance import SensorInstance\nfrom airflow.models.skipmixin import SkipMixin\nfrom airflow.models.taskreschedule import TaskReschedule\nfrom airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep\nfrom airflow.utils import timezone\nfrom airflow.utils.context import Context\n\n# We need to keep the import here because GCSToLocalFilesystemOperator released in\n# Google Provider before 3.0.0 imported apply_defaults from here.\n# See https://github.com/apache/airflow/issues/16035\nfrom airflow.utils.decorators import apply_defaults # noqa: F401\nfrom airflow.utils.docs import get_docs_url\n\n# As documented in https://dev.mysql.com/doc/refman/5.7/en/datetime.html.\n_MYSQL_TIMESTAMP_MAX = datetime.datetime(2038, 1, 19, 3, 14, 7, tzinfo=timezone.utc)\n\n\[email protected]_cache(maxsize=None)\ndef _is_metadatabase_mysql() -> bool:\n if settings.engine is None:\n raise AirflowException(\"Must initialize ORM first\")\n return settings.engine.url.get_backend_name() == \"mysql\"\n\n\nclass PokeReturnValue:\n \"\"\"\n Sensors can optionally return an instance of the PokeReturnValue class in the poke method.\n If an XCom value is supplied when the sensor is done, then the XCom value will be\n pushed through the operator return value.\n :param is_done: Set to true to indicate the sensor can stop poking.\n :param xcom_value: An optional XCOM value to be returned by the operator.\n \"\"\"\n\n def __init__(self, is_done: bool, xcom_value: Optional[Any] = None) -> None:\n self.xcom_value = xcom_value\n self.is_done = is_done\n\n def __bool__(self) -> bool:\n return self.is_done\n\n\nclass BaseSensorOperator(BaseOperator, SkipMixin):\n \"\"\"\n Sensor operators are derived from this class and inherit these attributes.\n\n Sensor operators keep executing at a time interval and succeed when\n a criteria is met and fail if and when they time out.\n\n :param soft_fail: Set to true to mark the task as SKIPPED on failure\n :param poke_interval: Time in seconds that the job should wait in\n between each tries\n :param timeout: Time, in seconds before the task times out and fails.\n :param mode: How the sensor operates.\n Options are: ``{ poke | reschedule }``, default is ``poke``.\n When set to ``poke`` the sensor is taking up a worker slot for its\n whole execution time and sleeps between pokes. Use this mode if the\n expected runtime of the sensor is short or if a short poke interval\n is required. Note that the sensor will hold onto a worker slot and\n a pool slot for the duration of the sensor's runtime in this mode.\n When set to ``reschedule`` the sensor task frees the worker slot when\n the criteria is not yet met and it's rescheduled at a later time. Use\n this mode if the time before the criteria is met is expected to be\n quite long. The poke interval should be more than one minute to\n prevent too much load on the scheduler.\n :param exponential_backoff: allow progressive longer waits between\n pokes by using exponential backoff algorithm\n \"\"\"\n\n ui_color = '#e6f1f2' # type: str\n valid_modes = ['poke', 'reschedule'] # type: Iterable[str]\n\n # As the poke context in smart sensor defines the poking job signature only,\n # The execution_fields defines other execution details\n # for this tasks such as the customer defined timeout, the email and the alert\n # setup. Smart sensor serialize these attributes into a different DB column so\n # that smart sensor service is able to handle corresponding execution details\n # without breaking the sensor poking logic with dedup.\n execution_fields = (\n 'poke_interval',\n 'retries',\n 'execution_timeout',\n 'timeout',\n 'email',\n 'email_on_retry',\n 'email_on_failure',\n )\n\n # Adds one additional dependency for all sensor operators that checks if a\n # sensor task instance can be rescheduled.\n deps = BaseOperator.deps | {ReadyToRescheduleDep()}\n\n def __init__(\n self,\n *,\n poke_interval: float = 60,\n timeout: float = conf.getfloat('sensors', 'default_timeout'),\n soft_fail: bool = False,\n mode: str = 'poke',\n exponential_backoff: bool = False,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.poke_interval = poke_interval\n self.soft_fail = soft_fail\n self.timeout = timeout\n self.mode = mode\n self.exponential_backoff = exponential_backoff\n self._validate_input_values()\n self.sensor_service_enabled = conf.getboolean('smart_sensor', 'use_smart_sensor')\n self.sensors_support_sensor_service = set(\n map(lambda l: l.strip(), conf.get('smart_sensor', 'sensors_enabled').split(','))\n )\n\n def _validate_input_values(self) -> None:\n if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:\n raise AirflowException(\"The poke_interval must be a non-negative number\")\n if not isinstance(self.timeout, (int, float)) or self.timeout < 0:\n raise AirflowException(\"The timeout must be a non-negative number\")\n if self.mode not in self.valid_modes:\n raise AirflowException(\n f\"The mode must be one of {self.valid_modes},'{self.dag.dag_id if self.has_dag() else ''} \"\n f\".{self.task_id}'; received '{self.mode}'.\"\n )\n\n # Quick check for poke_interval isn't immediately over MySQL's TIMESTAMP limit.\n # This check is only rudimentary to catch trivial user errors, e.g. mistakenly\n # set the value to milliseconds instead of seconds. There's another check when\n # we actually try to reschedule to ensure database coherence.\n if self.reschedule and _is_metadatabase_mysql():\n if timezone.utcnow() + datetime.timedelta(seconds=self.poke_interval) > _MYSQL_TIMESTAMP_MAX:\n raise AirflowException(\n f\"Cannot set poke_interval to {self.poke_interval} seconds in reschedule \"\n f\"mode since it will take reschedule time over MySQL's TIMESTAMP limit.\"\n )\n\n def poke(self, context: Context) -> Union[bool, PokeReturnValue]:\n \"\"\"\n Function that the sensors defined while deriving this class should\n override.\n \"\"\"\n raise AirflowException('Override me.')\n\n def is_smart_sensor_compatible(self):\n check_list = [\n not self.sensor_service_enabled,\n self.on_success_callback,\n self.on_retry_callback,\n self.on_failure_callback,\n ]\n if any(check_list):\n return False\n\n operator = self.__class__.__name__\n return operator in self.sensors_support_sensor_service\n\n def register_in_sensor_service(self, ti, context):\n \"\"\"\n Register ti in smart sensor service\n\n :param ti: Task instance object.\n :param context: TaskInstance template context from the ti.\n :return: boolean\n \"\"\"\n docs_url = get_docs_url('concepts/smart-sensors.html#migrating-to-deferrable-operators')\n warnings.warn(\n 'Your sensor is using Smart Sensors, which are deprecated.'\n f' Please use Deferrable Operators instead. See {docs_url} for more info.',\n DeprecationWarning,\n )\n poke_context = self.get_poke_context(context)\n execution_context = self.get_execution_context(context)\n\n return SensorInstance.register(ti, poke_context, execution_context)\n\n def get_poke_context(self, context):\n \"\"\"\n Return a dictionary with all attributes in poke_context_fields. The\n poke_context with operator class can be used to identify a unique\n sensor job.\n\n :param context: TaskInstance template context.\n :return: A dictionary with key in poke_context_fields.\n \"\"\"\n if not context:\n self.log.info(\"Function get_poke_context doesn't have a context input.\")\n\n poke_context_fields = getattr(self.__class__, \"poke_context_fields\", None)\n result = {key: getattr(self, key, None) for key in poke_context_fields}\n return result\n\n def get_execution_context(self, context):\n \"\"\"\n Return a dictionary with all attributes in execution_fields. The\n execution_context include execution requirement for each sensor task\n such as timeout setup, email_alert setup.\n\n :param context: TaskInstance template context.\n :return: A dictionary with key in execution_fields.\n \"\"\"\n if not context:\n self.log.info(\"Function get_execution_context doesn't have a context input.\")\n execution_fields = self.__class__.execution_fields\n\n result = {key: getattr(self, key, None) for key in execution_fields}\n if result['execution_timeout'] and isinstance(result['execution_timeout'], datetime.timedelta):\n result['execution_timeout'] = result['execution_timeout'].total_seconds()\n return result\n\n def execute(self, context: Context) -> Any:\n started_at: Union[datetime.datetime, float]\n\n if self.reschedule:\n\n # If reschedule, use the start date of the first try (first try can be either the very\n # first execution of the task, or the first execution after the task was cleared.)\n first_try_number = context['ti'].max_tries - self.retries + 1\n task_reschedules = TaskReschedule.find_for_task_instance(\n context['ti'], try_number=first_try_number\n )\n if not task_reschedules:\n start_date = timezone.utcnow()\n else:\n start_date = task_reschedules[0].start_date\n started_at = start_date\n\n def run_duration() -> float:\n # If we are in reschedule mode, then we have to compute diff\n # based on the time in a DB, so can't use time.monotonic\n return (timezone.utcnow() - start_date).total_seconds()\n\n else:\n started_at = start_monotonic = time.monotonic()\n\n def run_duration() -> float:\n return time.monotonic() - start_monotonic\n\n try_number = 1\n log_dag_id = self.dag.dag_id if self.has_dag() else \"\"\n\n xcom_value = None\n while True:\n poke_return = self.poke(context)\n if poke_return:\n if isinstance(poke_return, PokeReturnValue):\n xcom_value = poke_return.xcom_value\n break\n\n if run_duration() > self.timeout:\n # If sensor is in soft fail mode but times out raise AirflowSkipException.\n if self.soft_fail:\n raise AirflowSkipException(f\"Snap. Time is OUT. DAG id: {log_dag_id}\")\n else:\n raise AirflowSensorTimeout(f\"Snap. Time is OUT. DAG id: {log_dag_id}\")\n if self.reschedule:\n next_poke_interval = self._get_next_poke_interval(started_at, run_duration, try_number)\n reschedule_date = timezone.utcnow() + timedelta(seconds=next_poke_interval)\n if _is_metadatabase_mysql() and reschedule_date > _MYSQL_TIMESTAMP_MAX:\n raise AirflowSensorTimeout(\n f\"Cannot reschedule DAG {log_dag_id} to {reschedule_date.isoformat()} \"\n f\"since it is over MySQL's TIMESTAMP storage limit.\"\n )\n raise AirflowRescheduleException(reschedule_date)\n else:\n time.sleep(self._get_next_poke_interval(started_at, run_duration, try_number))\n try_number += 1\n self.log.info(\"Success criteria met. Exiting.\")\n return xcom_value\n\n def _get_next_poke_interval(\n self,\n started_at: Union[datetime.datetime, float],\n run_duration: Callable[[], float],\n try_number: int,\n ) -> float:\n \"\"\"Using the similar logic which is used for exponential backoff retry delay for operators.\"\"\"\n if not self.exponential_backoff:\n return self.poke_interval\n\n min_backoff = int(self.poke_interval * (2 ** (try_number - 2)))\n\n run_hash = int(\n hashlib.sha1(f\"{self.dag_id}#{self.task_id}#{started_at}#{try_number}\".encode()).hexdigest(),\n 16,\n )\n modded_hash = min_backoff + run_hash % min_backoff\n\n delay_backoff_in_seconds = min(modded_hash, timedelta.max.total_seconds() - 1)\n new_interval = min(self.timeout - int(run_duration()), delay_backoff_in_seconds)\n self.log.info(\"new %s interval is %s\", self.mode, new_interval)\n return new_interval\n\n def prepare_for_execution(self) -> BaseOperator:\n task = super().prepare_for_execution()\n # Sensors in `poke` mode can block execution of DAGs when running\n # with single process executor, thus we change the mode to`reschedule`\n # to allow parallel task being scheduled and executed\n if conf.get('core', 'executor') == \"DebugExecutor\":\n self.log.warning(\"DebugExecutor changes sensor mode to 'reschedule'.\")\n task.mode = 'reschedule'\n return task\n\n @property\n def reschedule(self):\n \"\"\"Define mode rescheduled sensors.\"\"\"\n return self.mode == 'reschedule'\n\n @classmethod\n def get_serialized_fields(cls):\n return super().get_serialized_fields() | {\"reschedule\"}\n\n\ndef poke_mode_only(cls):\n \"\"\"\n Class Decorator for child classes of BaseSensorOperator to indicate\n that instances of this class are only safe to use poke mode.\n\n Will decorate all methods in the class to assert they did not change\n the mode from 'poke'.\n\n :param cls: BaseSensor class to enforce methods only use 'poke' mode.\n \"\"\"\n\n def decorate(cls_type):\n def mode_getter(_):\n return 'poke'\n\n def mode_setter(_, value):\n if value != 'poke':\n raise ValueError(\"cannot set mode to 'poke'.\")\n\n if not issubclass(cls_type, BaseSensorOperator):\n raise ValueError(\n f\"poke_mode_only decorator should only be \"\n f\"applied to subclasses of BaseSensorOperator,\"\n f\" got:{cls_type}.\"\n )\n\n cls_type.mode = property(mode_getter, mode_setter)\n\n return cls_type\n\n return decorate(cls)\n", "path": "airflow/sensors/base.py" } ]
diff --git a/airflow/sensors/base.py b/airflow/sensors/base.py index 5cbe009c82b95..590b48f04bc73 100644 --- a/airflow/sensors/base.py +++ b/airflow/sensors/base.py @@ -339,6 +339,10 @@ def reschedule(self): """Define mode rescheduled sensors.""" return self.mode == 'reschedule' + @classmethod + def get_serialized_fields(cls): + return super().get_serialized_fields() | {"reschedule"} + def poke_mode_only(cls): """ diff --git a/tests/serialization/test_dag_serialization.py b/tests/serialization/test_dag_serialization.py index 0144501f1a13d..fe9fc7c7e5447 100644 --- a/tests/serialization/test_dag_serialization.py +++ b/tests/serialization/test_dag_serialization.py @@ -1462,6 +1462,7 @@ def poke(self, context: Context): assert "deps" in blob serialized_op = SerializedBaseOperator.deserialize_operator(blob) + assert serialized_op.reschedule == (mode == "reschedule") assert op.deps == serialized_op.deps @pytest.mark.parametrize( diff --git a/tests/ti_deps/deps/test_ready_to_reschedule_dep.py b/tests/ti_deps/deps/test_ready_to_reschedule_dep.py index 470166db21c8d..99416bbbc8927 100644 --- a/tests/ti_deps/deps/test_ready_to_reschedule_dep.py +++ b/tests/ti_deps/deps/test_ready_to_reschedule_dep.py @@ -31,7 +31,7 @@ class TestNotInReschedulePeriodDep(unittest.TestCase): def _get_task_instance(self, state): dag = DAG('test_dag') - task = Mock(dag=dag) + task = Mock(dag=dag, reschedule=True) ti = TaskInstance(task=task, state=state, run_id=None) return ti @@ -52,6 +52,11 @@ def test_should_pass_if_ignore_in_reschedule_period_is_set(self): dep_context = DepContext(ignore_in_reschedule_period=True) assert ReadyToRescheduleDep().is_met(ti=ti, dep_context=dep_context) + def test_should_pass_if_not_reschedule_mode(self): + ti = self._get_task_instance(State.UP_FOR_RESCHEDULE) + del ti.task.reschedule + assert ReadyToRescheduleDep().is_met(ti=ti) + def test_should_pass_if_not_in_none_state(self): ti = self._get_task_instance(State.UP_FOR_RETRY) assert ReadyToRescheduleDep().is_met(ti=ti)
vispy__vispy-2592
What's the status of GLFW? I see there's a `glfw` backend, but in `setup.py` it is neither listed as a dependency nor defined as an extra. Is that an oversight or deliberately? I'm packaging `vispy` for Fedora and with [glfw](https://pypi.org/project/glfw/) added as a dependency, I'm seeing `glfw` listed in the output of `vispy.sys_info()`. Tests using `glsw` as a backend also appear to work fine.
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"Vispy setup script.\n\nSteps to do a new release:\n\nPreparations:\n * Test on Windows, Linux, Mac\n * Make release notes\n * Update API documentation and other docs that need updating.\n\nDefine the version and release:\n * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m \"Version X.Y.Z\"`\n * push tag to github\n * verify that azure pipelines complete\n * verify that `.tar.gz` sdist and binary wheels are available on PyPI\n\nAnnouncing:\n * It can be worth waiting a day for eager users to report critical bugs\n * Announce in scipy-user, vispy mailing list, twitter (@vispyproject)\n\n\"\"\"\n\nimport os\nimport sys\nfrom os import path as op\nfrom setuptools import setup, find_packages\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import Extension\n\nname = 'vispy'\ndescription = 'Interactive visualization in Python'\n\n# Special commands for building jupyter notebook extension\nhere = os.path.dirname(os.path.abspath(__file__))\nnode_root = os.path.join(here, 'js')\nis_repo = os.path.exists(os.path.join(here, '.git'))\n\nnpm_path = os.pathsep.join([\n os.path.join(node_root, 'node_modules', '.bin'),\n os.environ.get('PATH', os.defpath),\n])\n\n\ndef set_builtin(name, value):\n if isinstance(__builtins__, dict):\n __builtins__[name] = value\n else:\n setattr(__builtins__, name, value)\n\n\nextensions = [Extension('vispy.visuals.text._sdf_cpu',\n sources=[op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],\n include_dirs=[np.get_include()],\n cython_directives={\"language_level\": \"3\"},\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n ),\n ]\n\ninstall_requires = ['numpy', 'freetype-py', 'hsluv', 'kiwisolver', 'packaging']\nif sys.version_info < (3, 9):\n install_requires.append(\"importlib-resources\")\n\nreadme = open('README.rst', 'r').read()\nsetup(\n name=name,\n use_scm_version={\n 'write_to': 'vispy/version.py',\n # uses setuptools_scm.version.get_local_dirty_tag (+dirty or empty string)\n 'local_scheme': 'dirty-tag',\n },\n author='Vispy contributors',\n author_email='[email protected]',\n license='BSD-3-Clause',\n url='http://vispy.org',\n download_url='https://pypi.python.org/pypi/vispy',\n keywords=[\n 'visualization',\n 'OpenGl',\n 'ES',\n 'medical',\n 'imaging',\n '3D',\n 'plotting',\n 'numpy',\n 'bigdata',\n 'ipython',\n 'jupyter',\n 'widgets',\n ],\n description=description,\n long_description=readme,\n long_description_content_type='text/x-rst',\n platforms='any',\n provides=['vispy'],\n python_requires='>=3.8',\n install_requires=install_requires,\n extras_require={\n 'ipython-static': ['ipython'],\n 'pyglet': ['pyglet>=1.2'],\n 'pyqt5': ['pyqt5'],\n 'pyqt6': ['pyqt6'],\n 'pyside': ['PySide'],\n 'pyside2': ['PySide2'],\n 'pyside6': ['PySide6'],\n 'sdl2': ['PySDL2'],\n 'wx': ['wxPython'],\n 'tk': ['pyopengltk'],\n 'doc': ['pydata-sphinx-theme', 'numpydoc', 'sphinxcontrib-apidoc',\n 'sphinx-gallery', 'myst-parser', 'pillow', 'pytest',\n 'pyopengl'],\n 'io': ['meshio', 'Pillow'],\n },\n packages=find_packages(exclude=['make']),\n ext_modules=cythonize(extensions, language_level=3),\n package_dir={'vispy': 'vispy'},\n data_files=[],\n include_package_data=True,\n package_data={\n 'vispy': [op.join('io', '_data', '*'),\n op.join('app', 'tests', 'qt-designer.ui'),\n op.join('util', 'fonts', 'data', '*.ttf'),\n ],\n\n 'vispy.glsl': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.antialias': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrowheads': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrows': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.collections': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.colormaps': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.lines': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.markers': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.math': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.misc': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.transforms': ['*.vert', '*.frag', \"*.glsl\"],\n\n },\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Framework :: IPython'\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"Vispy setup script.\n\nSteps to do a new release:\n\nPreparations:\n * Test on Windows, Linux, Mac\n * Make release notes\n * Update API documentation and other docs that need updating.\n\nDefine the version and release:\n * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m \"Version X.Y.Z\"`\n * push tag to github\n * verify that azure pipelines complete\n * verify that `.tar.gz` sdist and binary wheels are available on PyPI\n\nAnnouncing:\n * It can be worth waiting a day for eager users to report critical bugs\n * Announce in scipy-user, vispy mailing list, twitter (@vispyproject)\n\n\"\"\"\n\nimport os\nimport sys\nfrom os import path as op\nfrom setuptools import setup, find_packages\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import Extension\n\nname = 'vispy'\ndescription = 'Interactive visualization in Python'\n\n# Special commands for building jupyter notebook extension\nhere = os.path.dirname(os.path.abspath(__file__))\nnode_root = os.path.join(here, 'js')\nis_repo = os.path.exists(os.path.join(here, '.git'))\n\nnpm_path = os.pathsep.join([\n os.path.join(node_root, 'node_modules', '.bin'),\n os.environ.get('PATH', os.defpath),\n])\n\n\ndef set_builtin(name, value):\n if isinstance(__builtins__, dict):\n __builtins__[name] = value\n else:\n setattr(__builtins__, name, value)\n\n\nextensions = [Extension('vispy.visuals.text._sdf_cpu',\n sources=[op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],\n include_dirs=[np.get_include()],\n cython_directives={\"language_level\": \"3\"},\n define_macros=[(\"NPY_NO_DEPRECATED_API\", \"NPY_1_7_API_VERSION\")],\n ),\n ]\n\ninstall_requires = ['numpy', 'freetype-py', 'hsluv', 'kiwisolver', 'packaging']\nif sys.version_info < (3, 9):\n install_requires.append(\"importlib-resources\")\n\nreadme = open('README.rst', 'r').read()\nsetup(\n name=name,\n use_scm_version={\n 'write_to': 'vispy/version.py',\n # uses setuptools_scm.version.get_local_dirty_tag (+dirty or empty string)\n 'local_scheme': 'dirty-tag',\n },\n author='Vispy contributors',\n author_email='[email protected]',\n license='BSD-3-Clause',\n url='http://vispy.org',\n download_url='https://pypi.python.org/pypi/vispy',\n keywords=[\n 'visualization',\n 'OpenGl',\n 'ES',\n 'medical',\n 'imaging',\n '3D',\n 'plotting',\n 'numpy',\n 'bigdata',\n 'ipython',\n 'jupyter',\n 'widgets',\n ],\n description=description,\n long_description=readme,\n long_description_content_type='text/x-rst',\n platforms='any',\n provides=['vispy'],\n python_requires='>=3.8',\n install_requires=install_requires,\n extras_require={\n 'ipython-static': ['ipython'],\n 'pyglet': ['pyglet>=1.2'],\n 'pyqt5': ['pyqt5'],\n 'pyqt6': ['pyqt6'],\n 'pyside': ['PySide'],\n 'pyside2': ['PySide2'],\n 'pyside6': ['PySide6'],\n 'glfw': ['glfw'],\n 'sdl2': ['PySDL2'],\n 'wx': ['wxPython'],\n 'tk': ['pyopengltk'],\n 'doc': ['pydata-sphinx-theme', 'numpydoc', 'sphinxcontrib-apidoc',\n 'sphinx-gallery', 'myst-parser', 'pillow', 'pytest',\n 'pyopengl'],\n 'io': ['meshio', 'Pillow'],\n },\n packages=find_packages(exclude=['make']),\n ext_modules=cythonize(extensions, language_level=3),\n package_dir={'vispy': 'vispy'},\n data_files=[],\n include_package_data=True,\n package_data={\n 'vispy': [op.join('io', '_data', '*'),\n op.join('app', 'tests', 'qt-designer.ui'),\n op.join('util', 'fonts', 'data', '*.ttf'),\n ],\n\n 'vispy.glsl': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.antialias': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrowheads': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.arrows': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.collections': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.colormaps': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.lines': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.markers': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.math': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.misc': ['*.vert', '*.frag', \"*.glsl\"],\n 'vispy.glsl.transforms': ['*.vert', '*.frag', \"*.glsl\"],\n\n },\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Framework :: IPython'\n ],\n)\n", "path": "setup.py" } ]
diff --git a/setup.py b/setup.py index 65cd9febc..2b49960d9 100644 --- a/setup.py +++ b/setup.py @@ -106,6 +106,7 @@ def set_builtin(name, value): 'pyside': ['PySide'], 'pyside2': ['PySide2'], 'pyside6': ['PySide6'], + 'glfw': ['glfw'], 'sdl2': ['PySDL2'], 'wx': ['wxPython'], 'tk': ['pyopengltk'],
cocotb__cocotb-1470
Documentation building: "WARNING: duplicate label" With ``sphinx.ext.autosectionlabel`` (in use since https://github.com/cocotb/cocotb/commit/862012b3c01f90ea1fb715c206b4eb785119f964), we got quite some new "duplicate label" warnings. To fix some of those warnings, we could set ``autosectionlabel_prefix_document = True`` which prefixes each automatically generated section label with the name of the document it is in, followed by a colon. A drawback is that we would now hardcode the file name of the document into the reference, so that we are not as free with renaming documents anymore. Also, we cannot catch all those duplicates that way, e.g. in the release notes, there are multiple "new features" subsections. The alternative is to remove ``sphinx.ext.autosectionlabel`` and set our own reference anchors. I would prefer this. Ping @Martoni
[ { "content": "# -*- coding: utf-8 -*-\n#\n# cocotb documentation build configuration file\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport datetime\nimport os\nimport subprocess\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../..'))\n\n# Add in-tree extensions to path\nsys.path.insert(0, os.path.abspath('../sphinxext'))\n\nimport cocotb\nfrom distutils.version import LooseVersion\n\nos.environ[\"SPHINX_BUILD\"] = \"1\"\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.imgmath',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.makedomain',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.inheritance_diagram',\n 'cairosvgconverter',\n 'breathe',\n 'sphinx_issues',\n 'sphinxarg.ext',\n ]\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\n\n# Github repo\nissues_github_path = \"cocotb/cocotb\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'cocotb'\ncopyright = u'2014-{0}, PotentialVentures'.format(datetime.datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The full version, including alpha/beta/rc tags.\nrelease = cocotb.__version__\n# The short X.Y version.\nv_major, v_minor = LooseVersion(release).version[:2]\nversion = '{}.{}'.format(v_major, v_minor)\n\nautoclass_content = \"both\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\n# The Read the Docs theme is available from\n# https://github.com/snide/sphinx_rtd_theme\n#\n# Install with\n# - pip install sphinx_rtd_theme\n# or\n# - apt-get install python-sphinx-rtd-theme\n\ntry:\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\nexcept ImportError:\n sys.stderr.write('Warning: The Sphinx \\'sphinx_rtd_theme\\' HTML theme was '+\n 'not found. Make sure you have the theme installed to produce pretty '+\n 'HTML output. Falling back to the default theme.\\n')\n\n html_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'cocotbdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'cocotb.tex', u'cocotb Documentation',\n u'PotentialVentures', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'cocotb', u'cocotb Documentation',\n [u'PotentialVentures'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'cocotb', u'cocotb Documentation',\n u'PotentialVentures', 'cocotb', 'Coroutine Cosimulation TestBench \\\n environment for efficient verification of RTL using Python.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# For now show the todos\ntodo_include_todos = True\n\n# -- Extra setup for C documentation with Doxygen and breathe ------------------\n# see also https://breathe.readthedocs.io/en/latest/readthedocs.html\n\nenv = os.environ.copy()\nenv['PATH'] += ':.venv/bin'\nsubprocess.call('doxygen', cwd='..')\nsubprocess.call(['breathe-apidoc', '-o', 'source/generated', 'source/doxygen/_xml', '-f'], env=env, cwd='..')\n\n\nbreathe_projects = { \"cocotb\": \"doxygen/_xml\" }\nbreathe_default_project = \"cocotb\"\nbreathe_domain_by_extension = {\n \"h\" : \"cpp\",\n}\nbreathe_show_define_initializer = True\n\n# -- Extra setup for spelling check --------------------------------------------\n\n# Spelling check needs an additional module that is not installed by default.\n# Add it only if spelling check is requested so docs can be generated without it.\n\nif 'spelling' in sys.argv:\n extensions.append(\"sphinxcontrib.spelling\")\n\n# Spelling language.\nspelling_lang = 'en_US'\ntokenizer_lang = spelling_lang\n\n# Location of word list.\nspelling_word_list_filename = [\"spelling_wordlist.txt\", \"c_symbols.txt\"]\n\nspelling_ignore_pypi_package_names = False\nspelling_ignore_wiki_words = False\nspelling_show_suggestions = True\n\n# -- Extra setup for inheritance_diagram directive which uses graphviz ---------\n\ngraphviz_output_format = 'svg'\n\n# -- Extra setup for towncrier -------------------------------------------------\n# see also https://towncrier.readthedocs.io/en/actual-freaking-docs/\n\nin_progress_notes = subprocess.check_output(['towncrier', '--draft'],\n cwd='../..',\n universal_newlines=True)\nwith open('generated/master-notes.rst', 'w') as f:\n f.write(in_progress_notes)\n", "path": "documentation/source/conf.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n#\n# cocotb documentation build configuration file\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport datetime\nimport os\nimport subprocess\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../..'))\n\n# Add in-tree extensions to path\nsys.path.insert(0, os.path.abspath('../sphinxext'))\n\nimport cocotb\nfrom distutils.version import LooseVersion\n\nos.environ[\"SPHINX_BUILD\"] = \"1\"\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.imgmath',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.makedomain',\n 'sphinx.ext.inheritance_diagram',\n 'cairosvgconverter',\n 'breathe',\n 'sphinx_issues',\n 'sphinxarg.ext',\n ]\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\n\n# Github repo\nissues_github_path = \"cocotb/cocotb\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'cocotb'\ncopyright = u'2014-{0}, PotentialVentures'.format(datetime.datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The full version, including alpha/beta/rc tags.\nrelease = cocotb.__version__\n# The short X.Y version.\nv_major, v_minor = LooseVersion(release).version[:2]\nversion = '{}.{}'.format(v_major, v_minor)\n\nautoclass_content = \"both\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\n# The Read the Docs theme is available from\n# https://github.com/snide/sphinx_rtd_theme\n#\n# Install with\n# - pip install sphinx_rtd_theme\n# or\n# - apt-get install python-sphinx-rtd-theme\n\ntry:\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\nexcept ImportError:\n sys.stderr.write('Warning: The Sphinx \\'sphinx_rtd_theme\\' HTML theme was '+\n 'not found. Make sure you have the theme installed to produce pretty '+\n 'HTML output. Falling back to the default theme.\\n')\n\n html_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'cocotbdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'cocotb.tex', u'cocotb Documentation',\n u'PotentialVentures', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'cocotb', u'cocotb Documentation',\n [u'PotentialVentures'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'cocotb', u'cocotb Documentation',\n u'PotentialVentures', 'cocotb', 'Coroutine Cosimulation TestBench \\\n environment for efficient verification of RTL using Python.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# For now show the todos\ntodo_include_todos = True\n\n# -- Extra setup for C documentation with Doxygen and breathe ------------------\n# see also https://breathe.readthedocs.io/en/latest/readthedocs.html\n\nenv = os.environ.copy()\nenv['PATH'] += ':.venv/bin'\nsubprocess.call('doxygen', cwd='..')\nsubprocess.call(['breathe-apidoc', '-o', 'source/generated', 'source/doxygen/_xml', '-f'], env=env, cwd='..')\n\n\nbreathe_projects = { \"cocotb\": \"doxygen/_xml\" }\nbreathe_default_project = \"cocotb\"\nbreathe_domain_by_extension = {\n \"h\" : \"cpp\",\n}\nbreathe_show_define_initializer = True\n\n# -- Extra setup for spelling check --------------------------------------------\n\n# Spelling check needs an additional module that is not installed by default.\n# Add it only if spelling check is requested so docs can be generated without it.\n\nif 'spelling' in sys.argv:\n extensions.append(\"sphinxcontrib.spelling\")\n\n# Spelling language.\nspelling_lang = 'en_US'\ntokenizer_lang = spelling_lang\n\n# Location of word list.\nspelling_word_list_filename = [\"spelling_wordlist.txt\", \"c_symbols.txt\"]\n\nspelling_ignore_pypi_package_names = False\nspelling_ignore_wiki_words = False\nspelling_show_suggestions = True\n\n# -- Extra setup for inheritance_diagram directive which uses graphviz ---------\n\ngraphviz_output_format = 'svg'\n\n# -- Extra setup for towncrier -------------------------------------------------\n# see also https://towncrier.readthedocs.io/en/actual-freaking-docs/\n\nin_progress_notes = subprocess.check_output(['towncrier', '--draft'],\n cwd='../..',\n universal_newlines=True)\nwith open('generated/master-notes.rst', 'w') as f:\n f.write(in_progress_notes)\n", "path": "documentation/source/conf.py" } ]
diff --git a/documentation/source/building.rst b/documentation/source/building.rst index 763b1c270d..5c5bbcb1a8 100644 --- a/documentation/source/building.rst +++ b/documentation/source/building.rst @@ -53,7 +53,7 @@ and .. make:var:: WAVES Set this to 1 to enable wave traces dump for the Aldec Riviera-PRO and Mentor Graphics Questa simulators. - To get wave traces in Icarus Verilog see :ref:`Simulator Support`. + To get wave traces in Icarus Verilog see :ref:`sim-icarus-waveforms`. .. make:var:: VERILOG_SOURCES diff --git a/documentation/source/conf.py b/documentation/source/conf.py index ef46877454..87f2bc9583 100644 --- a/documentation/source/conf.py +++ b/documentation/source/conf.py @@ -42,7 +42,6 @@ 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', 'sphinxcontrib.makedomain', - 'sphinx.ext.autosectionlabel', 'sphinx.ext.inheritance_diagram', 'cairosvgconverter', 'breathe', diff --git a/documentation/source/coroutines.rst b/documentation/source/coroutines.rst index 1f96bfc133..1314aa5487 100644 --- a/documentation/source/coroutines.rst +++ b/documentation/source/coroutines.rst @@ -1,3 +1,5 @@ +.. _coroutines: + ********** Coroutines ********** diff --git a/documentation/source/extensions.rst b/documentation/source/extensions.rst index 1256e6f26f..661e3ad719 100644 --- a/documentation/source/extensions.rst +++ b/documentation/source/extensions.rst @@ -14,6 +14,8 @@ In cocotb, such functionality can be packaged and distributed as extensions. Technically, cocotb extensions are normal Python packages, and all standard Python packaging and distribution techniques can be used. Additionally, the cocotb community has agreed on a set of conventions to make extensions easier to use and to discover. +.. _extensions-naming-conventions: + Naming conventions ================== @@ -52,7 +54,7 @@ Bus monitors should inherit from the :any:`cocotb.monitors.BusMonitor` class. Packaging extensions ==================== -To package a cocotb extension as Python package follow the :ref:`naming conventions <Naming conventions>`, and the `normal Python packaging rules <https://packaging.python.org/tutorials/packaging-projects/>`_. +To package a cocotb extension as Python package follow the :ref:`extensions-naming-conventions`, and the `normal Python packaging rules <https://packaging.python.org/tutorials/packaging-projects/>`_. Extensions namespaced packages, implemented using the `native namespacing <https://packaging.python.org/guides/packaging-namespace-packages/#native-namespace-packages>`_ approach discussed in :pep:`420`. The module file hierarchy should be as follows (replace ``EXTNAME`` with the name of the extension, e.g. ``spi``). diff --git a/documentation/source/quickstart.rst b/documentation/source/quickstart.rst index 965fed428b..2186c5838f 100644 --- a/documentation/source/quickstart.rst +++ b/documentation/source/quickstart.rst @@ -252,7 +252,7 @@ Use ``sig.setimmediatevalue(new_val)`` to set a new value immediately In addition to regular value assignments (deposits), signals can be forced to a predetermined value or frozen at their current value. To achieve this, -the various actions described in :ref:`Assignment Methods <assignment-methods>` can be used. +the various actions described in :ref:`assignment-methods` can be used. .. code-block:: python3 @@ -303,7 +303,7 @@ We can also cast the signal handle directly to an integer: Parallel and sequential execution --------------------------------- -A :keyword:`yield` will run a function (that must be marked as a "coroutine", see :ref:`Coroutines`) +A :keyword:`yield` will run a function (that must be marked as a "coroutine", see :ref:`coroutines`) sequentially, i.e. wait for it to complete. If a coroutine should be run "in the background", i.e. in parallel to other coroutines, the way to do this is to :func:`~cocotb.fork` it. diff --git a/documentation/source/release_notes.rst b/documentation/source/release_notes.rst index 35a2e712c2..b82afffb83 100644 --- a/documentation/source/release_notes.rst +++ b/documentation/source/release_notes.rst @@ -18,7 +18,7 @@ This will likely be the last release to support Python 2.7. New features ------------ -- Initial support for the :ref:`Verilator` simulator (version 4.020 and above). +- Initial support for the :ref:`sim-verilator` simulator (version 4.020 and above). The integration of Verilator into cocotb is not yet as fast or as powerful as it is for other simulators. Please use the latest version of Verilator, and `report bugs <https://github.com/cocotb/cocotb/issues/new>`_ if you experience problems. - New makefile variables :make:var:`COCOTB_HDL_TIMEUNIT` and :make:var:`COCOTB_HDL_TIMEPRECISION` for setting the default time unit and precision that should be assumed for simulation when not specified by modules in the design. (:pr:`1113`) diff --git a/documentation/source/simulator_support.rst b/documentation/source/simulator_support.rst index 520be11b48..020fe18699 100644 --- a/documentation/source/simulator_support.rst +++ b/documentation/source/simulator_support.rst @@ -1,12 +1,19 @@ +.. _simulator-support: + ***************** Simulator Support ***************** This page documents any known quirks and gotchas in the various simulators. + +.. _sim-icarus: + Icarus ====== +.. _sim-icarus-accessing-bits: + Accessing bits in a vector -------------------------- @@ -18,6 +25,8 @@ Accessing bits of a vector doesn't work: See ``access_single_bit`` test in :file:`examples/functionality/tests/test_discovery.py`. +.. _sim-icarus-waveforms: + Waveforms --------- @@ -44,12 +53,17 @@ to the top component as shown in the example below: `endif endmodule +.. _sim-icarus-time: + Time unit and precision ----------------------- Setting the time unit and time precision is not possible from the command-line, and therefore make variables :make:var:`COCOTB_HDL_TIMEUNIT` and :make:var:`COCOTB_HDL_TIMEPRECISION` are ignored. + +.. _sim-verilator: + Verilator ========= @@ -69,20 +83,31 @@ If your design's clocks vary in precision, the performance of the simulation can .. versionadded:: 1.3 + +.. _sim-vcs: + Synopsys VCS ============ +.. _sim-aldec: + Aldec Riviera-PRO ================= The :envvar:`LICENSE_QUEUE` environment variable can be used for this simulator – this setting will be mirrored in the TCL ``license_queue`` variable to control runtime license checkouts. + +.. _sim-questa: + Mentor Questa ============= + +.. _sim-modelsim: + Mentor ModelSim =============== @@ -95,9 +120,20 @@ If you try to run with FLI enabled, you will see a ``vsim-FLI-3155`` error: ModelSim DE and SE (and Questa, of course) supports the FLI. -Cadence Incisive, Cadence Xcelium -================================= +.. _sim-incisive: + +Cadence Incisive +================ + + +.. _sim-xcelium: + +Cadence Xcelium +=============== + + +.. _sim-ghdl: GHDL ====
pymedusa__Medusa-9273
IPT Provider error Hello, it has been like a month since I am having this problem. Medusa is unable to use IPTorrents to search and download, it always worked perfect until one day. I have double check the cookie values and they are an exact match. Anyone can help me? here's the log with the error 2021-02-09 16:18:43 INFO FORCEDSEARCHQUEUE-MANUAL-364928 :: [2ab9d45] Unable to find manual results for: Snowpiercer - S02E02 - Smolder to Life 2021-02-09 16:18:43 INFO FORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Performing season pack search for Snowpiercer 2021-02-09 16:18:43 WARNING FORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Please configure the required cookies for this provider. Check your provider settings 2021-02-09 16:18:43 INFO FORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Unknown exception in url https://iptorrents.eu Error: Cloudflare IUAM possibility malformed, issue extracing delay value. 2021-02-09 16:18:43 INFO FORCEDSEARCHQUEUE-MANUAL-364928 :: IPTorrents :: [2ab9d45] Performing episode search for Snowpiercer Could it be because it's using iptorrents.eu instead of iptorrents.com?
[ { "content": "# coding=utf-8\n\n\"\"\"Provider code for IPTorrents.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass IPTorrentsProvider(TorrentProvider):\n \"\"\"IPTorrents Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(IPTorrentsProvider, self).__init__('IPTorrents')\n\n # URLs\n self.url = 'https://iptorrents.eu'\n self.urls = {\n 'base_url': self.url,\n 'login': urljoin(self.url, 'torrents'),\n 'search': urljoin(self.url, 't?%s%s&q=%s&qf=#torrents'),\n }\n\n # Proper Strings\n\n # Miscellaneous Options\n self.freeleech = False\n self.enable_cookies = True\n self.cookies = ''\n self.required_cookies = ('uid', 'pass')\n self.categories = '73=&60='\n\n # Cache\n self.cache = tv.Cache(self)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n freeleech = '&free=on' if self.freeleech else ''\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n\n # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile\n search_url = self.urls['search'] % (self.categories, freeleech, search_string)\n search_url += ';o=seeders' if mode != 'RSS' else ''\n\n response = self.session.get(search_url)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n data = re.sub(r'(?im)<button.+?<[/]button>', '', response.text, 0)\n\n results += self.parse(data, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_table = html.find('table', id='torrents')\n torrents = torrent_table('tr') if torrent_table else []\n\n # Continue only if at least one release is found\n if len(torrents) < 2 or html.find(text='No Torrents Found!'):\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n # Skip column headers\n for row in torrents[1:]:\n try:\n table_data = row('td')\n title = table_data[1].find('a').text\n download_url = self.urls['base_url'] + table_data[3].find('a')['href']\n if not all([title, download_url]):\n continue\n\n seeders = int(table_data[7].text)\n leechers = int(table_data[8].text)\n\n # Filter unseeded torrent\n if seeders < self.minseed:\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n torrent_size = table_data[5].text\n size = convert_size(torrent_size) or -1\n\n pubdate_raw = table_data[1].find('div').get_text().split('|')[-1].strip()\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n return self.cookie_login('sign in')\n\n\nprovider = IPTorrentsProvider()\n", "path": "medusa/providers/torrent/html/iptorrents.py" } ]
[ { "content": "# coding=utf-8\n\n\"\"\"Provider code for IPTorrents.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass IPTorrentsProvider(TorrentProvider):\n \"\"\"IPTorrents Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(IPTorrentsProvider, self).__init__('IPTorrents')\n\n # URLs\n self.url = 'https://iptorrents.eu'\n self.urls = {\n 'base_url': self.url,\n 'login': urljoin(self.url, 'torrents'),\n 'search': urljoin(self.url, 't?%s%s&q=%s&qf=#torrents'),\n }\n\n # Proper Strings\n\n # Miscellaneous Options\n self.freeleech = False\n self.enable_cookies = True\n self.cookies = ''\n self.required_cookies = ('uid', 'pass')\n self.categories = '73=&60='\n self.custom_url = None\n\n # Cache\n self.cache = tv.Cache(self)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n freeleech = '&free=on' if self.freeleech else ''\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n\n # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile\n search_url = self.urls['search'] % (self.categories, freeleech, search_string)\n search_url += ';o=seeders' if mode != 'RSS' else ''\n\n response = self.session.get(search_url)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n data = re.sub(r'(?im)<button.+?<[/]button>', '', response.text, 0)\n\n results += self.parse(data, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_table = html.find('table', id='torrents')\n torrents = torrent_table('tr') if torrent_table else []\n\n # Continue only if at least one release is found\n if len(torrents) < 2 or html.find(text='No Torrents Found!'):\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n # Skip column headers\n for row in torrents[1:]:\n try:\n table_data = row('td')\n title = table_data[1].find('a').text\n download_url = self.urls['base_url'] + table_data[3].find('a')['href']\n if not all([title, download_url]):\n continue\n\n seeders = int(table_data[7].text)\n leechers = int(table_data[8].text)\n\n # Filter unseeded torrent\n if seeders < self.minseed:\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n torrent_size = table_data[5].text\n size = convert_size(torrent_size) or -1\n\n pubdate_raw = table_data[1].find('div').get_text().split('|')[-1].strip()\n pubdate = self.parse_pubdate(pubdate_raw, human_time=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n return self.cookie_login('sign in')\n\n\nprovider = IPTorrentsProvider()\n", "path": "medusa/providers/torrent/html/iptorrents.py" } ]
diff --git a/medusa/providers/torrent/html/iptorrents.py b/medusa/providers/torrent/html/iptorrents.py index 9f16a540b3..1751ea6783 100644 --- a/medusa/providers/torrent/html/iptorrents.py +++ b/medusa/providers/torrent/html/iptorrents.py @@ -42,6 +42,7 @@ def __init__(self): self.cookies = '' self.required_cookies = ('uid', 'pass') self.categories = '73=&60=' + self.custom_url = None # Cache self.cache = tv.Cache(self)
RedHatInsights__insights-core-2085
Dmesg combiner always succeeds The [Dmesg combiner has only optional dependencies](https://github.com/RedHatInsights/insights-core/blob/master/insights/combiners/dmesg.py#L51), which means it always succeeds. This is an anti-pattern.
[ { "content": "\"\"\"\nDmesg\n=====\n\nCombiner for Dmesg information. It uses the results of the following parsers (if they are present):\n:class:`insights.parsers.dmesg.DmesgLineList`,\n:class:`insights.parsers.dmesg_log.DmesgLog`\n\nTypical output of the ``/var/log/dmesg`` file is::\n\n[ 0.000000] Initializing cgroup subsys cpu\n[ 0.000000] Linux version 3.10.0-862.el7.x86_64 ([email protected]) \\\n(gcc version 4.8.5 20150623 (Red Hat 4.8.5-28) (GCC) ) #1 SMP Wed Mar 21 18:14:51 EDT 2018\n[ 2.090905] SELinux: Completing initialization.\n[ 2.090907] SELinux: Setting up existing superblocks.\n[ 2.099684] systemd[1]: Successfully loaded SELinux policy in 82.788ms.\n[ 2.117410] ip_tables: (C) 2000-2006 Netfilter Core Team\n[ 2.117429] systemd[1]: Inserted module 'ip_tables'\n[ 2.376551] systemd-journald[441]: Received request to flush runtime journal from PID 1\n[ 2.716874] cryptd: max_cpu_qlen set to 100\n[ 2.804152] AES CTR mode by8 optimization enabled\n\nTypical output of the ``dmesg`` command is::\n\n[ 2.939498] [TTM] Initializing pool allocator\n[ 2.939502] [TTM] Initializing DMA pool allocator\n[ 2.940800] [drm] fb mappable at 0xFC000000\n[ 2.940947] fbcon: cirrusdrmfb (fb0) is primary device\n[ 2.957375] Console: switching to colour frame buffer device 128x48\n[ 2.959322] cirrus 0000:00:02.0: fb0: cirrusdrmfb frame buffer device\n[ 2.959334] [drm] Initialized cirrus 1.0.0 20110418 for 0000:00:02.0 on minor 0\n[ 3.062459] XFS (vda1): Ending clean mount\n[ 5.048484] ip6_tables: (C) 2000-2006 Netfilter Core Team\n[ 5.102434] Ebtables v2.0 registered\n\n\nExamples:\n >>> dmesg.dmesg_cmd_available\n True\n >>> dmesg.dmesg_log_available\n True\n >>> dmesg.dmesg_log_wrapped\n False\n\"\"\"\n\nfrom insights.core.plugins import combiner\nfrom insights.parsers.dmesg import DmesgLineList\nfrom insights.parsers.dmesg_log import DmesgLog\n\n\n@combiner(optional=[DmesgLineList, DmesgLog])\nclass Dmesg(object):\n \"\"\"\n Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.\n \"\"\"\n\n def __init__(self, dmesg_cmd, dmesg_log):\n if dmesg_cmd is not None:\n self.dmesg_cmd_available = True\n self.dmesg_cmd = dmesg_cmd\n self.dmesg_cmd_wrapped = True if 'Linux version' not in dmesg_cmd else False\n else:\n self.dmesg_cmd_available = False\n\n if dmesg_log is not None:\n self.dmesg_log_available = True\n self.dmesg_log = dmesg_log\n self.dmesg_log_wrapped = True if 'Linux version' not in dmesg_log else False\n else:\n self.dmesg_log_available = False\n", "path": "insights/combiners/dmesg.py" } ]
[ { "content": "\"\"\"\nDmesg\n=====\n\nCombiner for Dmesg information. It uses the results of the following parsers (if they are present):\n:class:`insights.parsers.dmesg.DmesgLineList`,\n:class:`insights.parsers.dmesg_log.DmesgLog`\n\nTypical output of the ``/var/log/dmesg`` file is::\n\n[ 0.000000] Initializing cgroup subsys cpu\n[ 0.000000] Linux version 3.10.0-862.el7.x86_64 ([email protected]) \\\n(gcc version 4.8.5 20150623 (Red Hat 4.8.5-28) (GCC) ) #1 SMP Wed Mar 21 18:14:51 EDT 2018\n[ 2.090905] SELinux: Completing initialization.\n[ 2.090907] SELinux: Setting up existing superblocks.\n[ 2.099684] systemd[1]: Successfully loaded SELinux policy in 82.788ms.\n[ 2.117410] ip_tables: (C) 2000-2006 Netfilter Core Team\n[ 2.117429] systemd[1]: Inserted module 'ip_tables'\n[ 2.376551] systemd-journald[441]: Received request to flush runtime journal from PID 1\n[ 2.716874] cryptd: max_cpu_qlen set to 100\n[ 2.804152] AES CTR mode by8 optimization enabled\n\nTypical output of the ``dmesg`` command is::\n\n[ 2.939498] [TTM] Initializing pool allocator\n[ 2.939502] [TTM] Initializing DMA pool allocator\n[ 2.940800] [drm] fb mappable at 0xFC000000\n[ 2.940947] fbcon: cirrusdrmfb (fb0) is primary device\n[ 2.957375] Console: switching to colour frame buffer device 128x48\n[ 2.959322] cirrus 0000:00:02.0: fb0: cirrusdrmfb frame buffer device\n[ 2.959334] [drm] Initialized cirrus 1.0.0 20110418 for 0000:00:02.0 on minor 0\n[ 3.062459] XFS (vda1): Ending clean mount\n[ 5.048484] ip6_tables: (C) 2000-2006 Netfilter Core Team\n[ 5.102434] Ebtables v2.0 registered\n\n\nExamples:\n >>> dmesg.dmesg_cmd_available\n True\n >>> dmesg.dmesg_log_available\n True\n >>> dmesg.dmesg_log_wrapped\n False\n\"\"\"\n\nfrom insights.core.plugins import combiner\nfrom insights.parsers.dmesg import DmesgLineList\nfrom insights.parsers.dmesg_log import DmesgLog\n\n\n@combiner([DmesgLineList, DmesgLog])\nclass Dmesg(object):\n \"\"\"\n Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.\n \"\"\"\n\n def __init__(self, dmesg_cmd, dmesg_log):\n if dmesg_cmd is not None:\n self.dmesg_cmd_available = True\n self.dmesg_cmd = dmesg_cmd\n self.dmesg_cmd_wrapped = True if 'Linux version' not in dmesg_cmd else False\n else:\n self.dmesg_cmd_available = False\n\n if dmesg_log is not None:\n self.dmesg_log_available = True\n self.dmesg_log = dmesg_log\n self.dmesg_log_wrapped = True if 'Linux version' not in dmesg_log else False\n else:\n self.dmesg_log_available = False\n", "path": "insights/combiners/dmesg.py" } ]
diff --git a/insights/combiners/dmesg.py b/insights/combiners/dmesg.py index 671a26cb5d..8424bb552f 100644 --- a/insights/combiners/dmesg.py +++ b/insights/combiners/dmesg.py @@ -48,7 +48,7 @@ from insights.parsers.dmesg_log import DmesgLog -@combiner(optional=[DmesgLineList, DmesgLog]) +@combiner([DmesgLineList, DmesgLog]) class Dmesg(object): """ Combiner for ``dmesg`` command and ``/var/log/dmesg`` file.
geopandas__geopandas-2398
Drop Python 3.7 We should consider dropping support for Python 3.7. We are roughly following numpy model (#1457) and numpy itself is 3.8+ now. Same applies to pyproj, which requires 3.8 (and causes some macOS CI failures because of some conda issues). I forgot about Python versions when doing #2358 and bumped only packages. @jorisvandenbossche if you're fine with that, I'll update CI matrix and related things.
[ { "content": "#!/usr/bin/env/python\n\"\"\"Installation script\n\n\"\"\"\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"GeoPandas is a project to add support for geographic data to\n`pandas`_ objects.\n\nThe goal of GeoPandas is to make working with geospatial data in\npython easier. It combines the capabilities of `pandas`_ and `shapely`_,\nproviding geospatial operations in pandas and a high-level interface\nto multiple geometries to shapely. GeoPandas enables you to easily do\noperations in python that would otherwise require a spatial database\nsuch as PostGIS.\n\n.. _pandas: http://pandas.pydata.org\n.. _shapely: http://shapely.readthedocs.io/en/latest/\n\"\"\"\n\nif os.environ.get(\"READTHEDOCS\", False) == \"True\":\n INSTALL_REQUIRES = []\nelse:\n INSTALL_REQUIRES = [\n \"pandas >= 1.0.0\",\n \"shapely >= 1.7\",\n \"fiona >= 1.8\",\n \"pyproj >= 2.6.1.post1\",\n \"packaging\",\n ]\n\n# get all data dirs in the datasets module\ndata_files = []\n\nfor item in os.listdir(\"geopandas/datasets\"):\n if not item.startswith(\"__\"):\n if os.path.isdir(os.path.join(\"geopandas/datasets/\", item)):\n data_files.append(os.path.join(\"datasets\", item, \"*\"))\n elif item.endswith(\".zip\"):\n data_files.append(os.path.join(\"datasets\", item))\n\ndata_files.append(\"tests/data/*\")\n\n\nsetup(\n name=\"geopandas\",\n version=versioneer.get_version(),\n description=\"Geographic pandas extensions\",\n license=\"BSD\",\n author=\"GeoPandas contributors\",\n author_email=\"[email protected]\",\n url=\"http://geopandas.org\",\n project_urls={\n \"Source\": \"https://github.com/geopandas/geopandas\",\n },\n long_description=LONG_DESCRIPTION,\n packages=[\n \"geopandas\",\n \"geopandas.io\",\n \"geopandas.tools\",\n \"geopandas.datasets\",\n \"geopandas.tests\",\n \"geopandas.tools.tests\",\n ],\n package_data={\"geopandas\": data_files},\n python_requires=\">=3.7\",\n install_requires=INSTALL_REQUIRES,\n cmdclass=versioneer.get_cmdclass(),\n)\n", "path": "setup.py" } ]
[ { "content": "#!/usr/bin/env/python\n\"\"\"Installation script\n\n\"\"\"\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"GeoPandas is a project to add support for geographic data to\n`pandas`_ objects.\n\nThe goal of GeoPandas is to make working with geospatial data in\npython easier. It combines the capabilities of `pandas`_ and `shapely`_,\nproviding geospatial operations in pandas and a high-level interface\nto multiple geometries to shapely. GeoPandas enables you to easily do\noperations in python that would otherwise require a spatial database\nsuch as PostGIS.\n\n.. _pandas: http://pandas.pydata.org\n.. _shapely: http://shapely.readthedocs.io/en/latest/\n\"\"\"\n\nif os.environ.get(\"READTHEDOCS\", False) == \"True\":\n INSTALL_REQUIRES = []\nelse:\n INSTALL_REQUIRES = [\n \"pandas >= 1.0.0\",\n \"shapely >= 1.7\",\n \"fiona >= 1.8\",\n \"pyproj >= 2.6.1.post1\",\n \"packaging\",\n ]\n\n# get all data dirs in the datasets module\ndata_files = []\n\nfor item in os.listdir(\"geopandas/datasets\"):\n if not item.startswith(\"__\"):\n if os.path.isdir(os.path.join(\"geopandas/datasets/\", item)):\n data_files.append(os.path.join(\"datasets\", item, \"*\"))\n elif item.endswith(\".zip\"):\n data_files.append(os.path.join(\"datasets\", item))\n\ndata_files.append(\"tests/data/*\")\n\n\nsetup(\n name=\"geopandas\",\n version=versioneer.get_version(),\n description=\"Geographic pandas extensions\",\n license=\"BSD\",\n author=\"GeoPandas contributors\",\n author_email=\"[email protected]\",\n url=\"http://geopandas.org\",\n project_urls={\n \"Source\": \"https://github.com/geopandas/geopandas\",\n },\n long_description=LONG_DESCRIPTION,\n packages=[\n \"geopandas\",\n \"geopandas.io\",\n \"geopandas.tools\",\n \"geopandas.datasets\",\n \"geopandas.tests\",\n \"geopandas.tools.tests\",\n ],\n package_data={\"geopandas\": data_files},\n python_requires=\">=3.8\",\n install_requires=INSTALL_REQUIRES,\n cmdclass=versioneer.get_cmdclass(),\n)\n", "path": "setup.py" } ]
diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 6e5e99e9c4..2a0da2bc23 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -17,8 +17,8 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 - uses: pre-commit/[email protected] Test: @@ -35,28 +35,28 @@ jobs: postgis: [false] dev: [false] env: - - ci/envs/37-minimal.yaml - - ci/envs/38-no-optional-deps.yaml - - ci/envs/37-pd11.yaml - - ci/envs/37-latest-defaults.yaml - - ci/envs/37-latest-conda-forge.yaml + - ci/envs/38-minimal.yaml + - ci/envs/39-no-optional-deps.yaml + - ci/envs/38-pd11-defaults.yaml + - ci/envs/38-latest-defaults.yaml - ci/envs/38-latest-conda-forge.yaml + - ci/envs/39-pd12-conda-forge.yaml - ci/envs/39-latest-conda-forge.yaml - ci/envs/310-latest-conda-forge.yaml include: - - env: ci/envs/37-latest-conda-forge.yaml + - env: ci/envs/38-latest-conda-forge.yaml os: macos-latest postgis: false dev: false - - env: ci/envs/38-latest-conda-forge.yaml + - env: ci/envs/39-latest-conda-forge.yaml os: macos-latest postgis: false dev: false - - env: ci/envs/37-latest-conda-forge.yaml + - env: ci/envs/38-latest-conda-forge.yaml os: windows-latest postgis: false dev: false - - env: ci/envs/38-latest-conda-forge.yaml + - env: ci/envs/39-latest-conda-forge.yaml os: windows-latest postgis: false dev: false @@ -65,7 +65,7 @@ jobs: dev: true steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Setup Conda uses: conda-incubator/setup-miniconda@v2 @@ -106,7 +106,7 @@ jobs: pytest -v -r s -n auto --color=yes --cov=geopandas --cov-append --cov-report term-missing --cov-report xml geopandas/ - name: Test with PostGIS - if: contains(matrix.env, '38-latest-conda-forge.yaml') && contains(matrix.os, 'ubuntu') + if: contains(matrix.env, '39-pd12-conda-forge.yaml') && contains(matrix.os, 'ubuntu') env: PGUSER: postgres PGPASSWORD: postgres @@ -118,7 +118,7 @@ jobs: pytest -v -r s --color=yes --cov=geopandas --cov-append --cov-report term-missing --cov-report xml geopandas/io/tests/test_sql.py | tee /dev/stderr | if grep SKIPPED >/dev/null;then echo "TESTS SKIPPED, FAILING" && exit 1;fi - name: Test docstrings - if: contains(matrix.env, '38-latest-conda-forge.yaml') && contains(matrix.os, 'ubuntu') + if: contains(matrix.env, '39-pd12-conda-forge.yaml') && contains(matrix.os, 'ubuntu') env: USE_PYGEOS: 1 run: | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 40fecd6bb6..76bbf2fbf5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,7 +13,6 @@ In general, GeoPandas follows the conventions of the pandas project where applicable. Please read the [contributing guidelines](https://geopandas.readthedocs.io/en/latest/community/contributing.html). - In particular, when submitting a pull request: - Install the requirements for the development environment (one can do this @@ -39,7 +38,7 @@ is a great way to get started if you'd like to make a contribution. Style ----- -- GeoPandas supports Python 3.7+ only. The last version of GeoPandas +- GeoPandas supports Python 3.8+ only. The last version of GeoPandas supporting Python 2 is 0.6. - GeoPandas follows [the PEP 8 diff --git a/ci/envs/38-latest-conda-forge.yaml b/ci/envs/38-latest-conda-forge.yaml index 6d2348f1b4..c6ea71260d 100644 --- a/ci/envs/38-latest-conda-forge.yaml +++ b/ci/envs/38-latest-conda-forge.yaml @@ -4,7 +4,7 @@ channels: dependencies: - python=3.8 # required - - pandas=1.2 + - pandas - shapely - fiona - pyproj @@ -23,12 +23,6 @@ dependencies: - xyzservices - scipy - geopy - # installed in tests.yaml, because not available on windows - # - postgis - SQLalchemy - - psycopg2 - libspatialite - - geoalchemy2 - pyarrow - # doctest testing - - pytest-doctestplus diff --git a/ci/envs/37-latest-defaults.yaml b/ci/envs/38-latest-defaults.yaml similarity index 96% rename from ci/envs/37-latest-defaults.yaml rename to ci/envs/38-latest-defaults.yaml index 071d3eac43..a4c4ef7ef6 100644 --- a/ci/envs/37-latest-defaults.yaml +++ b/ci/envs/38-latest-defaults.yaml @@ -2,7 +2,7 @@ name: test channels: - defaults dependencies: - - python=3.7 + - python=3.8 # required - pandas - shapely diff --git a/ci/envs/37-minimal.yaml b/ci/envs/38-minimal.yaml similarity index 96% rename from ci/envs/37-minimal.yaml rename to ci/envs/38-minimal.yaml index 9df7b6df4e..b697c63ca7 100644 --- a/ci/envs/37-minimal.yaml +++ b/ci/envs/38-minimal.yaml @@ -3,7 +3,7 @@ channels: - defaults - conda-forge dependencies: - - python=3.7 + - python=3.8 # required - numpy=1.18 - pandas==1.0.5 diff --git a/ci/envs/37-pd11.yaml b/ci/envs/38-pd11-defaults.yaml similarity index 96% rename from ci/envs/37-pd11.yaml rename to ci/envs/38-pd11-defaults.yaml index a58c7ec556..eb524c2bdf 100644 --- a/ci/envs/37-pd11.yaml +++ b/ci/envs/38-pd11-defaults.yaml @@ -2,7 +2,7 @@ name: test channels: - defaults dependencies: - - python=3.7 + - python=3.8 # required - pandas=1.1 - shapely diff --git a/ci/envs/38-no-optional-deps.yaml b/ci/envs/39-no-optional-deps.yaml similarity index 92% rename from ci/envs/38-no-optional-deps.yaml rename to ci/envs/39-no-optional-deps.yaml index 7261cc6f60..cb6f036a57 100644 --- a/ci/envs/38-no-optional-deps.yaml +++ b/ci/envs/39-no-optional-deps.yaml @@ -2,7 +2,7 @@ name: test channels: - conda-forge dependencies: - - python=3.8 + - python=3.9 # required - pandas - shapely diff --git a/ci/envs/37-latest-conda-forge.yaml b/ci/envs/39-pd12-conda-forge.yaml similarity index 65% rename from ci/envs/37-latest-conda-forge.yaml rename to ci/envs/39-pd12-conda-forge.yaml index dadd2c4038..fc675bf98d 100644 --- a/ci/envs/37-latest-conda-forge.yaml +++ b/ci/envs/39-pd12-conda-forge.yaml @@ -2,9 +2,9 @@ name: test channels: - conda-forge dependencies: - - python=3.7 + - python=3.9 # required - - pandas + - pandas=1.2 - shapely - fiona - pyproj @@ -23,6 +23,12 @@ dependencies: - xyzservices - scipy - geopy + # installed in tests.yaml, because not available on windows + # - postgis - SQLalchemy + - psycopg2 - libspatialite + - geoalchemy2 - pyarrow + # doctest testing + - pytest-doctestplus diff --git a/doc/source/community/contributing.rst b/doc/source/community/contributing.rst index 21444db81e..1eb9520e34 100644 --- a/doc/source/community/contributing.rst +++ b/doc/source/community/contributing.rst @@ -44,7 +44,7 @@ In particular, when submitting a pull request: imports when possible, and explicit relative imports for local imports when necessary in tests. -- GeoPandas supports Python 3.7+ only. The last version of GeoPandas +- GeoPandas supports Python 3.8+ only. The last version of GeoPandas supporting Python 2 is 0.6. diff --git a/setup.py b/setup.py index ca1d0ec129..277d6847c3 100644 --- a/setup.py +++ b/setup.py @@ -71,7 +71,7 @@ "geopandas.tools.tests", ], package_data={"geopandas": data_files}, - python_requires=">=3.7", + python_requires=">=3.8", install_requires=INSTALL_REQUIRES, cmdclass=versioneer.get_cmdclass(), )
ibis-project__ibis-7498
bug: sqlite BLOB datatype not handled ### What happened? I'm converting some old databases that had binary output as columns, and I am unable to work with these tables in ibis. I created this failing test to show the problem: ``` from __future__ import annotations import sqlite3 import pandas as pd import pytest from packaging.version import parse as vparse import ibis import ibis.expr.datatypes as dt @pytest.fixture(scope="session") def db(tmp_path_factory): path = str(tmp_path_factory.mktemp("databases") / "formats.db") con = sqlite3.connect(path) con.execute('CREATE TABLE blobs (data BLOB)') blob_data = b'\x00\x01\x02\x03' con.execute('INSERT INTO blobs (data) VALUES (?)', (blob_data,)) con.close() return path def test_insert_blob(db): conn = ibis.sqlite.connect(db) t = conn.table("blobs") assert(t["data"].type() == dt.blob) ``` Fails with: E TypeError: Unable to convert type: BLOB() ### What version of ibis are you using? master branch ### What backend(s) are you using, if any? .sqlite ### Relevant log output ```sh db = '/private/var/folders/0b/z9kzmm317mb6r9nyt5q1889r0000gn/T/****/pytest-13/databases0/formats.db' def test_insert_blob(db): conn = ibis.sqlite.connect(db) > t = conn.table("blobs") ibis/backends/sqlite/tests/test_blob_types.py:24: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ibis/backends/base/sql/alchemy/__init__.py:629: in table schema = self._schema_from_sqla_table( ibis/backends/base/sql/alchemy/__init__.py:555: in _schema_from_sqla_table dtype = cls.compiler.translator_class.get_ibis_type( ibis/backends/base/sql/alchemy/translator.py:81: in get_ibis_type return cls.type_mapper.to_ibis(sqla_type, nullable=nullable) ibis/backends/sqlite/datatypes.py:32: in to_ibis return super().to_ibis(typ, nullable=nullable) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ cls = <class 'ibis.backends.sqlite.datatypes.SqliteType'>, typ = BLOB(), nullable = True @classmethod def to_ibis(cls, typ: sat.TypeEngine, nullable: bool = True) -> dt.DataType: """Convert a SQLAlchemy type to an Ibis type. Parameters ---------- typ SQLAlchemy type to convert. nullable : bool, optional Whether the returned type should be nullable. Returns ------- Ibis type. """ if dtype := _from_sqlalchemy_types.get(type(typ)): return dtype(nullable=nullable) elif isinstance(typ, sat.Float): if (float_typ := _FLOAT_PREC_TO_TYPE.get(typ.precision)) is not None: return float_typ(nullable=nullable) return dt.Decimal(typ.precision, typ.scale, nullable=nullable) elif isinstance(typ, sat.Numeric): return dt.Decimal(typ.precision, typ.scale, nullable=nullable) elif isinstance(typ, ArrayType): return dt.Array(cls.to_ibis(typ.value_type), nullable=nullable) elif isinstance(typ, sat.ARRAY): ndim = typ.dimensions if ndim is not None and ndim != 1: raise NotImplementedError("Nested array types not yet supported") return dt.Array(cls.to_ibis(typ.item_type), nullable=nullable) elif isinstance(typ, StructType): fields = {k: cls.to_ibis(v) for k, v in typ.fields.items()} return dt.Struct(fields, nullable=nullable) elif isinstance(typ, MapType): return dt.Map( cls.to_ibis(typ.key_type), cls.to_ibis(typ.value_type), nullable=nullable, ) elif isinstance(typ, sa.DateTime): timezone = "UTC" if typ.timezone else None return dt.Timestamp(timezone, nullable=nullable) elif isinstance(typ, sat.String): return dt.String(nullable=nullable) elif geospatial_supported and isinstance(typ, ga.types._GISType): name = typ.geometry_type.upper() try: return _GEOSPATIAL_TYPES[name](geotype=typ.name, nullable=nullable) except KeyError: raise ValueError(f"Unrecognized geometry type: {name}") else: > raise TypeError(f"Unable to convert type: {typ!r}") E TypeError: Unable to convert type: BLOB() ibis/backends/base/sql/alchemy/datatypes.py:323: TypeError ``` ### Code of Conduct - [X] I agree to follow this project's Code of Conduct
[ { "content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport sqlalchemy as sa\nimport sqlalchemy.types as sat\nimport toolz\nfrom sqlalchemy.ext.compiler import compiles\n\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.base.sql.alchemy.geospatial import geospatial_supported\nfrom ibis.backends.base.sqlglot.datatypes import SqlglotType\nfrom ibis.common.collections import FrozenDict\nfrom ibis.formats import TypeMapper\n\nif TYPE_CHECKING:\n from collections.abc import Mapping\n\nif geospatial_supported:\n import geoalchemy2 as ga\n\n\nclass ArrayType(sat.UserDefinedType):\n def __init__(self, value_type: sat.TypeEngine):\n self.value_type = sat.to_instance(value_type)\n\n def result_processor(self, dialect, coltype) -> None:\n if not coltype.lower().startswith(\"array\"):\n return None\n\n inner_processor = (\n self.value_type.result_processor(dialect, coltype[len(\"array(\") : -1])\n or toolz.identity\n )\n\n return lambda v: v if v is None else list(map(inner_processor, v))\n\n\n@compiles(ArrayType, \"default\")\ndef compiles_array(element, compiler, **kw):\n return f\"ARRAY({compiler.process(element.value_type, **kw)})\"\n\n\nclass StructType(sat.UserDefinedType):\n cache_ok = True\n\n def __init__(self, fields: Mapping[str, sat.TypeEngine]) -> None:\n self.fields = FrozenDict(\n {name: sat.to_instance(typ) for name, typ in fields.items()}\n )\n\n\n@compiles(StructType, \"default\")\ndef compiles_struct(element, compiler, **kw):\n quote = compiler.dialect.identifier_preparer.quote\n content = \", \".join(\n f\"{quote(field)} {compiler.process(typ, **kw)}\"\n for field, typ in element.fields.items()\n )\n return f\"STRUCT({content})\"\n\n\nclass MapType(sat.UserDefinedType):\n def __init__(self, key_type: sat.TypeEngine, value_type: sat.TypeEngine):\n self.key_type = sat.to_instance(key_type)\n self.value_type = sat.to_instance(value_type)\n\n\n@compiles(MapType, \"default\")\ndef compiles_map(element, compiler, **kw):\n key_type = compiler.process(element.key_type, **kw)\n value_type = compiler.process(element.value_type, **kw)\n return f\"MAP({key_type}, {value_type})\"\n\n\nclass UInt64(sat.Integer):\n pass\n\n\nclass UInt32(sat.Integer):\n pass\n\n\nclass UInt16(sat.Integer):\n pass\n\n\nclass UInt8(sat.Integer):\n pass\n\n\n@compiles(UInt64, \"postgresql\")\n@compiles(UInt32, \"postgresql\")\n@compiles(UInt16, \"postgresql\")\n@compiles(UInt8, \"postgresql\")\n@compiles(UInt64, \"mssql\")\n@compiles(UInt32, \"mssql\")\n@compiles(UInt16, \"mssql\")\n@compiles(UInt8, \"mssql\")\n@compiles(UInt64, \"mysql\")\n@compiles(UInt32, \"mysql\")\n@compiles(UInt16, \"mysql\")\n@compiles(UInt8, \"mysql\")\n@compiles(UInt64, \"snowflake\")\n@compiles(UInt32, \"snowflake\")\n@compiles(UInt16, \"snowflake\")\n@compiles(UInt8, \"snowflake\")\n@compiles(UInt64, \"sqlite\")\n@compiles(UInt32, \"sqlite\")\n@compiles(UInt16, \"sqlite\")\n@compiles(UInt8, \"sqlite\")\n@compiles(UInt64, \"trino\")\n@compiles(UInt32, \"trino\")\n@compiles(UInt16, \"trino\")\n@compiles(UInt8, \"trino\")\ndef compile_uint(element, compiler, **kw):\n dialect_name = compiler.dialect.name\n raise TypeError(\n f\"unsigned integers are not supported in the {dialect_name} backend\"\n )\n\n\ntry:\n UUID = sat.UUID\nexcept AttributeError:\n\n class UUID(sat.String):\n pass\n\nelse:\n\n @compiles(UUID, \"default\")\n def compiles_uuid(element, compiler, **kw):\n return \"UUID\"\n\n\nclass Unknown(sa.Text):\n pass\n\n\n_from_sqlalchemy_types = {\n sat.BOOLEAN: dt.Boolean,\n sat.Boolean: dt.Boolean,\n sat.BINARY: dt.Binary,\n sat.LargeBinary: dt.Binary,\n sat.DATE: dt.Date,\n sat.Date: dt.Date,\n sat.TEXT: dt.String,\n sat.Text: dt.String,\n sat.TIME: dt.Time,\n sat.Time: dt.Time,\n sat.VARCHAR: dt.String,\n sat.CHAR: dt.String,\n sat.String: dt.String,\n sat.SMALLINT: dt.Int16,\n sat.SmallInteger: dt.Int16,\n sat.INTEGER: dt.Int32,\n sat.Integer: dt.Int32,\n sat.BIGINT: dt.Int64,\n sat.BigInteger: dt.Int64,\n sat.REAL: dt.Float32,\n sat.FLOAT: dt.Float64,\n UInt16: dt.UInt16,\n UInt32: dt.UInt32,\n UInt64: dt.UInt64,\n UInt8: dt.UInt8,\n Unknown: dt.Unknown,\n sat.JSON: dt.JSON,\n UUID: dt.UUID,\n}\n\n_to_sqlalchemy_types = {\n dt.Null: sat.NullType,\n dt.Date: sat.Date,\n dt.Time: sat.Time,\n dt.Boolean: sat.Boolean,\n dt.Binary: sat.LargeBinary,\n dt.String: sat.Text,\n dt.Decimal: sat.Numeric,\n # Mantissa-based\n dt.Float16: sat.REAL,\n dt.Float32: sat.REAL,\n # precision is the number of bits in the mantissa\n # without specifying this, some backends interpret the type as FLOAT, which\n # means float32 (and precision == 24)\n dt.Float64: sat.FLOAT(precision=53),\n dt.Int8: sat.SmallInteger,\n dt.Int16: sat.SmallInteger,\n dt.Int32: sat.Integer,\n dt.Int64: sat.BigInteger,\n dt.UInt8: UInt8,\n dt.UInt16: UInt16,\n dt.UInt32: UInt32,\n dt.UInt64: UInt64,\n dt.JSON: sat.JSON,\n dt.Interval: sat.Interval,\n dt.Unknown: Unknown,\n dt.MACADDR: sat.Text,\n dt.INET: sat.Text,\n dt.UUID: UUID,\n}\n\n_FLOAT_PREC_TO_TYPE = {\n 11: dt.Float16,\n 24: dt.Float32,\n 53: dt.Float64,\n}\n\n_GEOSPATIAL_TYPES = {\n \"POINT\": dt.Point,\n \"LINESTRING\": dt.LineString,\n \"POLYGON\": dt.Polygon,\n \"MULTILINESTRING\": dt.MultiLineString,\n \"MULTIPOINT\": dt.MultiPoint,\n \"MULTIPOLYGON\": dt.MultiPolygon,\n \"GEOMETRY\": dt.Geometry,\n \"GEOGRAPHY\": dt.Geography,\n}\n\n\nclass AlchemyType(TypeMapper):\n @classmethod\n def to_string(cls, dtype: dt.DataType):\n dialect_class = sa.dialects.registry.load(cls.dialect)\n return str(\n sa.types.to_instance(cls.from_ibis(dtype)).compile(dialect=dialect_class())\n )\n\n @classmethod\n def from_string(cls, type_string, nullable=True):\n return SqlglotType.from_string(type_string, nullable=nullable)\n\n @classmethod\n def from_ibis(cls, dtype: dt.DataType) -> sat.TypeEngine:\n \"\"\"Convert an Ibis type to a SQLAlchemy type.\n\n Parameters\n ----------\n dtype\n Ibis type to convert.\n\n Returns\n -------\n SQLAlchemy type.\n \"\"\"\n if dtype.is_decimal():\n return sat.NUMERIC(dtype.precision, dtype.scale)\n elif dtype.is_timestamp():\n return sat.TIMESTAMP(timezone=bool(dtype.timezone))\n elif dtype.is_array():\n return ArrayType(cls.from_ibis(dtype.value_type))\n elif dtype.is_struct():\n fields = {k: cls.from_ibis(v) for k, v in dtype.fields.items()}\n return StructType(fields)\n elif dtype.is_map():\n return MapType(\n cls.from_ibis(dtype.key_type), cls.from_ibis(dtype.value_type)\n )\n elif dtype.is_geospatial():\n if geospatial_supported:\n if dtype.geotype == \"geometry\":\n return ga.Geometry\n elif dtype.geotype == \"geography\":\n return ga.Geography\n else:\n return ga.types._GISType\n else:\n raise TypeError(\"geospatial types are not supported\")\n else:\n return _to_sqlalchemy_types[type(dtype)]\n\n @classmethod\n def to_ibis(cls, typ: sat.TypeEngine, nullable: bool = True) -> dt.DataType:\n \"\"\"Convert a SQLAlchemy type to an Ibis type.\n\n Parameters\n ----------\n typ\n SQLAlchemy type to convert.\n nullable : bool, optional\n Whether the returned type should be nullable.\n\n Returns\n -------\n Ibis type.\n \"\"\"\n if dtype := _from_sqlalchemy_types.get(type(typ)):\n return dtype(nullable=nullable)\n elif isinstance(typ, sat.Float):\n if (float_typ := _FLOAT_PREC_TO_TYPE.get(typ.precision)) is not None:\n return float_typ(nullable=nullable)\n return dt.Decimal(typ.precision, typ.scale, nullable=nullable)\n elif isinstance(typ, sat.Numeric):\n return dt.Decimal(typ.precision, typ.scale, nullable=nullable)\n elif isinstance(typ, ArrayType):\n return dt.Array(cls.to_ibis(typ.value_type), nullable=nullable)\n elif isinstance(typ, sat.ARRAY):\n ndim = typ.dimensions\n if ndim is not None and ndim != 1:\n raise NotImplementedError(\"Nested array types not yet supported\")\n return dt.Array(cls.to_ibis(typ.item_type), nullable=nullable)\n elif isinstance(typ, StructType):\n fields = {k: cls.to_ibis(v) for k, v in typ.fields.items()}\n return dt.Struct(fields, nullable=nullable)\n elif isinstance(typ, MapType):\n return dt.Map(\n cls.to_ibis(typ.key_type),\n cls.to_ibis(typ.value_type),\n nullable=nullable,\n )\n elif isinstance(typ, sa.DateTime):\n timezone = \"UTC\" if typ.timezone else None\n return dt.Timestamp(timezone, nullable=nullable)\n elif isinstance(typ, sat.String):\n return dt.String(nullable=nullable)\n elif geospatial_supported and isinstance(typ, ga.types._GISType):\n name = typ.geometry_type.upper()\n try:\n return _GEOSPATIAL_TYPES[name](geotype=typ.name, nullable=nullable)\n except KeyError:\n raise ValueError(f\"Unrecognized geometry type: {name}\")\n else:\n raise TypeError(f\"Unable to convert type: {typ!r}\")\n", "path": "ibis/backends/base/sql/alchemy/datatypes.py" } ]
[ { "content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport sqlalchemy as sa\nimport sqlalchemy.types as sat\nimport toolz\nfrom sqlalchemy.ext.compiler import compiles\n\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.base.sql.alchemy.geospatial import geospatial_supported\nfrom ibis.backends.base.sqlglot.datatypes import SqlglotType\nfrom ibis.common.collections import FrozenDict\nfrom ibis.formats import TypeMapper\n\nif TYPE_CHECKING:\n from collections.abc import Mapping\n\nif geospatial_supported:\n import geoalchemy2 as ga\n\n\nclass ArrayType(sat.UserDefinedType):\n def __init__(self, value_type: sat.TypeEngine):\n self.value_type = sat.to_instance(value_type)\n\n def result_processor(self, dialect, coltype) -> None:\n if not coltype.lower().startswith(\"array\"):\n return None\n\n inner_processor = (\n self.value_type.result_processor(dialect, coltype[len(\"array(\") : -1])\n or toolz.identity\n )\n\n return lambda v: v if v is None else list(map(inner_processor, v))\n\n\n@compiles(ArrayType, \"default\")\ndef compiles_array(element, compiler, **kw):\n return f\"ARRAY({compiler.process(element.value_type, **kw)})\"\n\n\nclass StructType(sat.UserDefinedType):\n cache_ok = True\n\n def __init__(self, fields: Mapping[str, sat.TypeEngine]) -> None:\n self.fields = FrozenDict(\n {name: sat.to_instance(typ) for name, typ in fields.items()}\n )\n\n\n@compiles(StructType, \"default\")\ndef compiles_struct(element, compiler, **kw):\n quote = compiler.dialect.identifier_preparer.quote\n content = \", \".join(\n f\"{quote(field)} {compiler.process(typ, **kw)}\"\n for field, typ in element.fields.items()\n )\n return f\"STRUCT({content})\"\n\n\nclass MapType(sat.UserDefinedType):\n def __init__(self, key_type: sat.TypeEngine, value_type: sat.TypeEngine):\n self.key_type = sat.to_instance(key_type)\n self.value_type = sat.to_instance(value_type)\n\n\n@compiles(MapType, \"default\")\ndef compiles_map(element, compiler, **kw):\n key_type = compiler.process(element.key_type, **kw)\n value_type = compiler.process(element.value_type, **kw)\n return f\"MAP({key_type}, {value_type})\"\n\n\nclass UInt64(sat.Integer):\n pass\n\n\nclass UInt32(sat.Integer):\n pass\n\n\nclass UInt16(sat.Integer):\n pass\n\n\nclass UInt8(sat.Integer):\n pass\n\n\n@compiles(UInt64, \"postgresql\")\n@compiles(UInt32, \"postgresql\")\n@compiles(UInt16, \"postgresql\")\n@compiles(UInt8, \"postgresql\")\n@compiles(UInt64, \"mssql\")\n@compiles(UInt32, \"mssql\")\n@compiles(UInt16, \"mssql\")\n@compiles(UInt8, \"mssql\")\n@compiles(UInt64, \"mysql\")\n@compiles(UInt32, \"mysql\")\n@compiles(UInt16, \"mysql\")\n@compiles(UInt8, \"mysql\")\n@compiles(UInt64, \"snowflake\")\n@compiles(UInt32, \"snowflake\")\n@compiles(UInt16, \"snowflake\")\n@compiles(UInt8, \"snowflake\")\n@compiles(UInt64, \"sqlite\")\n@compiles(UInt32, \"sqlite\")\n@compiles(UInt16, \"sqlite\")\n@compiles(UInt8, \"sqlite\")\n@compiles(UInt64, \"trino\")\n@compiles(UInt32, \"trino\")\n@compiles(UInt16, \"trino\")\n@compiles(UInt8, \"trino\")\ndef compile_uint(element, compiler, **kw):\n dialect_name = compiler.dialect.name\n raise TypeError(\n f\"unsigned integers are not supported in the {dialect_name} backend\"\n )\n\n\ntry:\n UUID = sat.UUID\nexcept AttributeError:\n\n class UUID(sat.String):\n pass\n\nelse:\n\n @compiles(UUID, \"default\")\n def compiles_uuid(element, compiler, **kw):\n return \"UUID\"\n\n\nclass Unknown(sa.Text):\n pass\n\n\n_from_sqlalchemy_types = {\n sat.BOOLEAN: dt.Boolean,\n sat.Boolean: dt.Boolean,\n sat.BINARY: dt.Binary,\n sat.BLOB: dt.Binary,\n sat.LargeBinary: dt.Binary,\n sat.DATE: dt.Date,\n sat.Date: dt.Date,\n sat.TEXT: dt.String,\n sat.Text: dt.String,\n sat.TIME: dt.Time,\n sat.Time: dt.Time,\n sat.VARCHAR: dt.String,\n sat.CHAR: dt.String,\n sat.String: dt.String,\n sat.SMALLINT: dt.Int16,\n sat.SmallInteger: dt.Int16,\n sat.INTEGER: dt.Int32,\n sat.Integer: dt.Int32,\n sat.BIGINT: dt.Int64,\n sat.BigInteger: dt.Int64,\n sat.REAL: dt.Float32,\n sat.FLOAT: dt.Float64,\n UInt16: dt.UInt16,\n UInt32: dt.UInt32,\n UInt64: dt.UInt64,\n UInt8: dt.UInt8,\n Unknown: dt.Unknown,\n sat.JSON: dt.JSON,\n UUID: dt.UUID,\n}\n\n_to_sqlalchemy_types = {\n dt.Null: sat.NullType,\n dt.Date: sat.Date,\n dt.Time: sat.Time,\n dt.Boolean: sat.Boolean,\n dt.Binary: sat.LargeBinary,\n dt.String: sat.Text,\n dt.Decimal: sat.Numeric,\n # Mantissa-based\n dt.Float16: sat.REAL,\n dt.Float32: sat.REAL,\n # precision is the number of bits in the mantissa\n # without specifying this, some backends interpret the type as FLOAT, which\n # means float32 (and precision == 24)\n dt.Float64: sat.FLOAT(precision=53),\n dt.Int8: sat.SmallInteger,\n dt.Int16: sat.SmallInteger,\n dt.Int32: sat.Integer,\n dt.Int64: sat.BigInteger,\n dt.UInt8: UInt8,\n dt.UInt16: UInt16,\n dt.UInt32: UInt32,\n dt.UInt64: UInt64,\n dt.JSON: sat.JSON,\n dt.Interval: sat.Interval,\n dt.Unknown: Unknown,\n dt.MACADDR: sat.Text,\n dt.INET: sat.Text,\n dt.UUID: UUID,\n}\n\n_FLOAT_PREC_TO_TYPE = {\n 11: dt.Float16,\n 24: dt.Float32,\n 53: dt.Float64,\n}\n\n_GEOSPATIAL_TYPES = {\n \"POINT\": dt.Point,\n \"LINESTRING\": dt.LineString,\n \"POLYGON\": dt.Polygon,\n \"MULTILINESTRING\": dt.MultiLineString,\n \"MULTIPOINT\": dt.MultiPoint,\n \"MULTIPOLYGON\": dt.MultiPolygon,\n \"GEOMETRY\": dt.Geometry,\n \"GEOGRAPHY\": dt.Geography,\n}\n\n\nclass AlchemyType(TypeMapper):\n @classmethod\n def to_string(cls, dtype: dt.DataType):\n dialect_class = sa.dialects.registry.load(cls.dialect)\n return str(\n sa.types.to_instance(cls.from_ibis(dtype)).compile(dialect=dialect_class())\n )\n\n @classmethod\n def from_string(cls, type_string, nullable=True):\n return SqlglotType.from_string(type_string, nullable=nullable)\n\n @classmethod\n def from_ibis(cls, dtype: dt.DataType) -> sat.TypeEngine:\n \"\"\"Convert an Ibis type to a SQLAlchemy type.\n\n Parameters\n ----------\n dtype\n Ibis type to convert.\n\n Returns\n -------\n SQLAlchemy type.\n \"\"\"\n if dtype.is_decimal():\n return sat.NUMERIC(dtype.precision, dtype.scale)\n elif dtype.is_timestamp():\n return sat.TIMESTAMP(timezone=bool(dtype.timezone))\n elif dtype.is_array():\n return ArrayType(cls.from_ibis(dtype.value_type))\n elif dtype.is_struct():\n fields = {k: cls.from_ibis(v) for k, v in dtype.fields.items()}\n return StructType(fields)\n elif dtype.is_map():\n return MapType(\n cls.from_ibis(dtype.key_type), cls.from_ibis(dtype.value_type)\n )\n elif dtype.is_geospatial():\n if geospatial_supported:\n if dtype.geotype == \"geometry\":\n return ga.Geometry\n elif dtype.geotype == \"geography\":\n return ga.Geography\n else:\n return ga.types._GISType\n else:\n raise TypeError(\"geospatial types are not supported\")\n else:\n return _to_sqlalchemy_types[type(dtype)]\n\n @classmethod\n def to_ibis(cls, typ: sat.TypeEngine, nullable: bool = True) -> dt.DataType:\n \"\"\"Convert a SQLAlchemy type to an Ibis type.\n\n Parameters\n ----------\n typ\n SQLAlchemy type to convert.\n nullable : bool, optional\n Whether the returned type should be nullable.\n\n Returns\n -------\n Ibis type.\n \"\"\"\n if dtype := _from_sqlalchemy_types.get(type(typ)):\n return dtype(nullable=nullable)\n elif isinstance(typ, sat.Float):\n if (float_typ := _FLOAT_PREC_TO_TYPE.get(typ.precision)) is not None:\n return float_typ(nullable=nullable)\n return dt.Decimal(typ.precision, typ.scale, nullable=nullable)\n elif isinstance(typ, sat.Numeric):\n return dt.Decimal(typ.precision, typ.scale, nullable=nullable)\n elif isinstance(typ, ArrayType):\n return dt.Array(cls.to_ibis(typ.value_type), nullable=nullable)\n elif isinstance(typ, sat.ARRAY):\n ndim = typ.dimensions\n if ndim is not None and ndim != 1:\n raise NotImplementedError(\"Nested array types not yet supported\")\n return dt.Array(cls.to_ibis(typ.item_type), nullable=nullable)\n elif isinstance(typ, StructType):\n fields = {k: cls.to_ibis(v) for k, v in typ.fields.items()}\n return dt.Struct(fields, nullable=nullable)\n elif isinstance(typ, MapType):\n return dt.Map(\n cls.to_ibis(typ.key_type),\n cls.to_ibis(typ.value_type),\n nullable=nullable,\n )\n elif isinstance(typ, sa.DateTime):\n timezone = \"UTC\" if typ.timezone else None\n return dt.Timestamp(timezone, nullable=nullable)\n elif isinstance(typ, sat.String):\n return dt.String(nullable=nullable)\n elif geospatial_supported and isinstance(typ, ga.types._GISType):\n name = typ.geometry_type.upper()\n try:\n return _GEOSPATIAL_TYPES[name](geotype=typ.name, nullable=nullable)\n except KeyError:\n raise ValueError(f\"Unrecognized geometry type: {name}\")\n else:\n raise TypeError(f\"Unable to convert type: {typ!r}\")\n", "path": "ibis/backends/base/sql/alchemy/datatypes.py" } ]
diff --git a/ibis/backends/base/sql/alchemy/datatypes.py b/ibis/backends/base/sql/alchemy/datatypes.py index 84e16ca49dfc..214a80956259 100644 --- a/ibis/backends/base/sql/alchemy/datatypes.py +++ b/ibis/backends/base/sql/alchemy/datatypes.py @@ -142,6 +142,7 @@ class Unknown(sa.Text): sat.BOOLEAN: dt.Boolean, sat.Boolean: dt.Boolean, sat.BINARY: dt.Binary, + sat.BLOB: dt.Binary, sat.LargeBinary: dt.Binary, sat.DATE: dt.Date, sat.Date: dt.Date, diff --git a/ibis/backends/sqlite/tests/test_types.py b/ibis/backends/sqlite/tests/test_types.py index ab7813086e9f..8d2fbec6a834 100644 --- a/ibis/backends/sqlite/tests/test_types.py +++ b/ibis/backends/sqlite/tests/test_types.py @@ -39,6 +39,7 @@ def db(tmp_path_factory): con.execute("CREATE TABLE timestamps (ts TIMESTAMP)") con.execute("CREATE TABLE timestamps_tz (ts TIMESTAMP)") con.execute("CREATE TABLE weird (str_col STRING, date_col ITSADATE)") + con.execute("CREATE TABLE blobs (data BLOB)") with con: con.executemany("INSERT INTO timestamps VALUES (?)", [(t,) for t in TIMESTAMPS]) con.executemany( @@ -54,6 +55,7 @@ def db(tmp_path_factory): ("d", "2022-01-04"), ], ) + con.execute("INSERT INTO blobs (data) VALUES (?)", (b"\x00\x01\x02\x03",)) con.close() return path @@ -90,3 +92,9 @@ def test_type_map(db): } ) assert res.equals(sol) + + +def test_read_blob(db): + con = ibis.sqlite.connect(db) + t = con.table("blobs") + assert t["data"].type() == dt.binary
elastic__apm-agent-python-1064
Add support for Django 3.2 Django 3.2 is slated for a release in April. Running the test suite, a few problems came up: - [ ] App label needs to be a valid Python identifier, ours is not (renaming it from `elasticapm.contrib.django` to `elasticapm` should suffice) Several test failures: - [ ] `test_broken_500_handler_with_middleware` - [ ] `test_404_middleware` - [ ] `test_response_error_id_middleware` - [ ] `test_django_logging_request_kwarg` - [ ] `test_django_logging_middleware` - [ ] `test_capture_body_config_is_dynamic_for_transactions` - [ ] `test_capture_headers_config_is_dynamic_for_transactions` - [ ] `test_capture_headers` - [ ] `test_transaction_name_from_route` Most of these look similar in nature, I suspect an issue with middlewares. Nothing jumps out in the [release notes](https://docs.djangoproject.com/en/3.2/releases/3.2/), though.
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom functools import partial\n\nfrom django.apps import AppConfig\nfrom django.conf import settings as django_settings\n\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.django.client import get_client\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.wsgi import get_current_url\n\nlogger = get_logger(\"elasticapm.traces\")\n\nERROR_DISPATCH_UID = \"elasticapm-exceptions\"\nREQUEST_START_DISPATCH_UID = \"elasticapm-request-start\"\nREQUEST_FINISH_DISPATCH_UID = \"elasticapm-request-stop\"\n\nMIDDLEWARE_NAME = \"elasticapm.contrib.django.middleware.TracingMiddleware\"\n\nTRACEPARENT_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACEPARENT_LEGACY_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_LEGACY_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACESTATE_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACESTATE_HEADER_NAME.upper().replace(\"-\", \"_\")\n\n\nclass ElasticAPMConfig(AppConfig):\n name = \"elasticapm.contrib.django\"\n label = \"elasticapm.contrib.django\"\n verbose_name = \"ElasticAPM\"\n\n def __init__(self, *args, **kwargs):\n super(ElasticAPMConfig, self).__init__(*args, **kwargs)\n self.client = None\n\n def ready(self):\n self.client = get_client()\n if self.client.config.autoinsert_django_middleware:\n self.insert_middleware(django_settings)\n register_handlers(self.client)\n if self.client.config.instrument and self.client.config.enabled:\n instrument(self.client)\n else:\n self.client.logger.debug(\"Skipping instrumentation. INSTRUMENT is set to False.\")\n\n @staticmethod\n def insert_middleware(settings):\n if hasattr(settings, \"MIDDLEWARE\"):\n middleware_list = settings.MIDDLEWARE\n middleware_attr = \"MIDDLEWARE\"\n elif hasattr(settings, \"MIDDLEWARE_CLASSES\"): # can be removed when we drop support for Django 1.x\n middleware_list = settings.MIDDLEWARE_CLASSES\n middleware_attr = \"MIDDLEWARE_CLASSES\"\n else:\n logger.debug(\"Could not find middleware setting, not autoinserting tracing middleware\")\n return\n is_tuple = isinstance(middleware_list, tuple)\n if is_tuple:\n middleware_list = list(middleware_list)\n elif not isinstance(middleware_list, list):\n logger.debug(\"%s setting is not of type list or tuple, not autoinserting tracing middleware\")\n return\n if middleware_list is not None and MIDDLEWARE_NAME not in middleware_list:\n logger.debug(\"Inserting tracing middleware into settings.%s\", middleware_attr)\n middleware_list.insert(0, MIDDLEWARE_NAME)\n if is_tuple:\n middleware_list = tuple(middleware_list)\n if middleware_list:\n setattr(settings, middleware_attr, middleware_list)\n\n\ndef register_handlers(client):\n from django.core.signals import got_request_exception, request_finished, request_started\n\n from elasticapm.contrib.django.handlers import exception_handler\n\n # Connect to Django's internal signal handlers\n got_request_exception.disconnect(dispatch_uid=ERROR_DISPATCH_UID)\n got_request_exception.connect(partial(exception_handler, client), dispatch_uid=ERROR_DISPATCH_UID, weak=False)\n\n request_started.disconnect(dispatch_uid=REQUEST_START_DISPATCH_UID)\n request_started.connect(\n partial(_request_started_handler, client), dispatch_uid=REQUEST_START_DISPATCH_UID, weak=False\n )\n\n request_finished.disconnect(dispatch_uid=REQUEST_FINISH_DISPATCH_UID)\n request_finished.connect(\n lambda sender, **kwargs: client.end_transaction() if _should_start_transaction(client) else None,\n dispatch_uid=REQUEST_FINISH_DISPATCH_UID,\n weak=False,\n )\n\n # If we can import celery, register ourselves as exception handler\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_exception_tracking\n\n try:\n register_exception_tracking(client)\n except Exception as e:\n client.logger.exception(\"Failed installing django-celery hook: %s\" % e)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _request_started_handler(client, sender, *args, **kwargs):\n if not _should_start_transaction(client):\n return\n # try to find trace id\n trace_parent = None\n if \"environ\" in kwargs:\n url = get_current_url(kwargs[\"environ\"], strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n trace_parent = TraceParent.from_headers(\n kwargs[\"environ\"],\n TRACEPARENT_HEADER_NAME_WSGI,\n TRACEPARENT_LEGACY_HEADER_NAME_WSGI,\n TRACESTATE_HEADER_NAME_WSGI,\n )\n elif \"scope\" in kwargs:\n scope = kwargs[\"scope\"]\n fake_environ = {\"SCRIPT_NAME\": scope.get(\"root_path\", \"\"), \"PATH_INFO\": scope[\"path\"], \"QUERY_STRING\": \"\"}\n url = get_current_url(fake_environ, strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n if \"headers\" in scope:\n trace_parent = TraceParent.from_headers(scope[\"headers\"])\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n\ndef instrument(client):\n \"\"\"\n Auto-instruments code to get nice spans\n \"\"\"\n from elasticapm.instrumentation.control import instrument\n\n instrument()\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_instrumentation\n\n register_instrumentation(client)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _should_start_transaction(client):\n middleware_attr = \"MIDDLEWARE\" if getattr(django_settings, \"MIDDLEWARE\", None) is not None else \"MIDDLEWARE_CLASSES\"\n middleware = getattr(django_settings, middleware_attr)\n return (\n (not django_settings.DEBUG or client.config.debug)\n and middleware\n and \"elasticapm.contrib.django.middleware.TracingMiddleware\" in middleware\n )\n", "path": "elasticapm/contrib/django/apps.py" } ]
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom functools import partial\n\nfrom django.apps import AppConfig\nfrom django.conf import settings as django_settings\n\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.django.client import get_client\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.wsgi import get_current_url\n\nlogger = get_logger(\"elasticapm.traces\")\n\nERROR_DISPATCH_UID = \"elasticapm-exceptions\"\nREQUEST_START_DISPATCH_UID = \"elasticapm-request-start\"\nREQUEST_FINISH_DISPATCH_UID = \"elasticapm-request-stop\"\n\nMIDDLEWARE_NAME = \"elasticapm.contrib.django.middleware.TracingMiddleware\"\n\nTRACEPARENT_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACEPARENT_LEGACY_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_LEGACY_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACESTATE_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACESTATE_HEADER_NAME.upper().replace(\"-\", \"_\")\n\n\nclass ElasticAPMConfig(AppConfig):\n name = \"elasticapm.contrib.django\"\n label = \"elasticapm\"\n verbose_name = \"ElasticAPM\"\n\n def __init__(self, *args, **kwargs):\n super(ElasticAPMConfig, self).__init__(*args, **kwargs)\n self.client = None\n\n def ready(self):\n self.client = get_client()\n if self.client.config.autoinsert_django_middleware:\n self.insert_middleware(django_settings)\n register_handlers(self.client)\n if self.client.config.instrument and self.client.config.enabled:\n instrument(self.client)\n else:\n self.client.logger.debug(\"Skipping instrumentation. INSTRUMENT is set to False.\")\n\n @staticmethod\n def insert_middleware(settings):\n if hasattr(settings, \"MIDDLEWARE\"):\n middleware_list = settings.MIDDLEWARE\n middleware_attr = \"MIDDLEWARE\"\n elif hasattr(settings, \"MIDDLEWARE_CLASSES\"): # can be removed when we drop support for Django 1.x\n middleware_list = settings.MIDDLEWARE_CLASSES\n middleware_attr = \"MIDDLEWARE_CLASSES\"\n else:\n logger.debug(\"Could not find middleware setting, not autoinserting tracing middleware\")\n return\n is_tuple = isinstance(middleware_list, tuple)\n if is_tuple:\n middleware_list = list(middleware_list)\n elif not isinstance(middleware_list, list):\n logger.debug(\"%s setting is not of type list or tuple, not autoinserting tracing middleware\")\n return\n if middleware_list is not None and MIDDLEWARE_NAME not in middleware_list:\n logger.debug(\"Inserting tracing middleware into settings.%s\", middleware_attr)\n middleware_list.insert(0, MIDDLEWARE_NAME)\n if is_tuple:\n middleware_list = tuple(middleware_list)\n if middleware_list:\n setattr(settings, middleware_attr, middleware_list)\n\n\ndef register_handlers(client):\n from django.core.signals import got_request_exception, request_finished, request_started\n\n from elasticapm.contrib.django.handlers import exception_handler\n\n # Connect to Django's internal signal handlers\n got_request_exception.disconnect(dispatch_uid=ERROR_DISPATCH_UID)\n got_request_exception.connect(partial(exception_handler, client), dispatch_uid=ERROR_DISPATCH_UID, weak=False)\n\n request_started.disconnect(dispatch_uid=REQUEST_START_DISPATCH_UID)\n request_started.connect(\n partial(_request_started_handler, client), dispatch_uid=REQUEST_START_DISPATCH_UID, weak=False\n )\n\n request_finished.disconnect(dispatch_uid=REQUEST_FINISH_DISPATCH_UID)\n request_finished.connect(\n lambda sender, **kwargs: client.end_transaction() if _should_start_transaction(client) else None,\n dispatch_uid=REQUEST_FINISH_DISPATCH_UID,\n weak=False,\n )\n\n # If we can import celery, register ourselves as exception handler\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_exception_tracking\n\n try:\n register_exception_tracking(client)\n except Exception as e:\n client.logger.exception(\"Failed installing django-celery hook: %s\" % e)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _request_started_handler(client, sender, *args, **kwargs):\n if not _should_start_transaction(client):\n return\n # try to find trace id\n trace_parent = None\n if \"environ\" in kwargs:\n url = get_current_url(kwargs[\"environ\"], strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n trace_parent = TraceParent.from_headers(\n kwargs[\"environ\"],\n TRACEPARENT_HEADER_NAME_WSGI,\n TRACEPARENT_LEGACY_HEADER_NAME_WSGI,\n TRACESTATE_HEADER_NAME_WSGI,\n )\n elif \"scope\" in kwargs:\n scope = kwargs[\"scope\"]\n fake_environ = {\"SCRIPT_NAME\": scope.get(\"root_path\", \"\"), \"PATH_INFO\": scope[\"path\"], \"QUERY_STRING\": \"\"}\n url = get_current_url(fake_environ, strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n if \"headers\" in scope:\n trace_parent = TraceParent.from_headers(scope[\"headers\"])\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n\ndef instrument(client):\n \"\"\"\n Auto-instruments code to get nice spans\n \"\"\"\n from elasticapm.instrumentation.control import instrument\n\n instrument()\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_instrumentation\n\n register_instrumentation(client)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _should_start_transaction(client):\n middleware_attr = \"MIDDLEWARE\" if getattr(django_settings, \"MIDDLEWARE\", None) is not None else \"MIDDLEWARE_CLASSES\"\n middleware = getattr(django_settings, middleware_attr)\n return (\n (not django_settings.DEBUG or client.config.debug)\n and middleware\n and \"elasticapm.contrib.django.middleware.TracingMiddleware\" in middleware\n )\n", "path": "elasticapm/contrib/django/apps.py" } ]
diff --git a/.ci/.jenkins_framework.yml b/.ci/.jenkins_framework.yml index 95ba9afd9..db375c5bc 100644 --- a/.ci/.jenkins_framework.yml +++ b/.ci/.jenkins_framework.yml @@ -5,6 +5,7 @@ FRAMEWORK: - django-1.11 - django-2.0 - django-3.1 + - django-3.2 - flask-0.12 - flask-1.1 - opentracing-newest diff --git a/.ci/.jenkins_framework_full.yml b/.ci/.jenkins_framework_full.yml index 3f1f97a37..26dff1427 100644 --- a/.ci/.jenkins_framework_full.yml +++ b/.ci/.jenkins_framework_full.yml @@ -8,7 +8,8 @@ FRAMEWORK: - django-2.2 - django-3.0 - django-3.1 -# - django-master + - django-3.2 + # - django-master - flask-0.10 - flask-0.11 - flask-0.12 diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 94b466c34..d4f1977f4 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -50,6 +50,7 @@ endif::[] * Fix transaction names for Starlette Mount routes {pull}1037[#1037] * Fix for elastic excepthook arguments {pull}1050[#1050] * Fix issue with remote configuration when resetting config values {pull}1068[#1068] +* Use a label for the elasticapm Django app that is compatible with Django 3.2 validation {pull}1064[#1064] [[release-notes-6.x]] diff --git a/elasticapm/contrib/django/apps.py b/elasticapm/contrib/django/apps.py index 24da893f7..1919b9f56 100644 --- a/elasticapm/contrib/django/apps.py +++ b/elasticapm/contrib/django/apps.py @@ -54,7 +54,7 @@ class ElasticAPMConfig(AppConfig): name = "elasticapm.contrib.django" - label = "elasticapm.contrib.django" + label = "elasticapm" verbose_name = "ElasticAPM" def __init__(self, *args, **kwargs): diff --git a/tests/contrib/django/fixtures.py b/tests/contrib/django/fixtures.py index ddf1e3070..538fda8b8 100644 --- a/tests/contrib/django/fixtures.py +++ b/tests/contrib/django/fixtures.py @@ -57,7 +57,7 @@ def django_elasticapm_client(request): client_config.setdefault("service_name", "app") client_config.setdefault("secret_token", "secret") client_config.setdefault("span_frames_min_duration", -1) - app = apps.get_app_config("elasticapm.contrib.django") + app = apps.get_app_config("elasticapm") old_client = app.client client = TempStoreClient(**client_config) register_handlers(client) @@ -83,7 +83,7 @@ def django_sending_elasticapm_client(request, validating_httpserver): client_config.setdefault("secret_token", "secret") client_config.setdefault("transport_class", "elasticapm.transport.http.Transport") client_config.setdefault("span_frames_min_duration", -1) - app = apps.get_app_config("elasticapm.contrib.django") + app = apps.get_app_config("elasticapm") old_client = app.client client = DjangoClient(**client_config) register_handlers(client) diff --git a/tests/contrib/django/testapp/urls.py b/tests/contrib/django/testapp/urls.py index 1619ed608..8c7255029 100644 --- a/tests/contrib/django/testapp/urls.py +++ b/tests/contrib/django/testapp/urls.py @@ -32,11 +32,16 @@ import django from django.conf import settings -from django.conf.urls import url from django.http import HttpResponse from tests.contrib.django.testapp import views +try: + from django.conf.urls import re_path +except ImportError: + # Django < 2 + from django.conf.urls import url as re_path + def handler500(request): if getattr(settings, "BREAK_THAT_500", False): @@ -45,27 +50,27 @@ def handler500(request): urlpatterns = ( - url(r"^render-heavy-template$", views.render_template_view, name="render-heavy-template"), - url(r"^render-user-template$", views.render_user_view, name="render-user-template"), - url(r"^no-error$", views.no_error, name="elasticapm-no-error"), - url(r"^no-error-slash/$", views.no_error, name="elasticapm-no-error-slash"), - url(r"^http-error/(?P<status>[0-9]{3})$", views.http_error, name="elasticapm-http-error"), - url(r"^logging$", views.logging_view, name="elasticapm-logging"), - url(r"^ignored-exception/$", views.ignored_exception, name="elasticapm-ignored-exception"), - url(r"^fake-login$", views.fake_login, name="elasticapm-fake-login"), - url(r"^trigger-500$", views.raise_exc, name="elasticapm-raise-exc"), - url(r"^trigger-500-ioerror$", views.raise_ioerror, name="elasticapm-raise-ioerror"), - url(r"^trigger-500-decorated$", views.decorated_raise_exc, name="elasticapm-raise-exc-decor"), - url(r"^trigger-500-django$", views.django_exc, name="elasticapm-django-exc"), - url(r"^trigger-500-template$", views.template_exc, name="elasticapm-template-exc"), - url(r"^trigger-500-log-request$", views.logging_request_exc, name="elasticapm-log-request-exc"), - url(r"^streaming$", views.streaming_view, name="elasticapm-streaming-view"), - url(r"^name-override$", views.override_transaction_name_view, name="elasticapm-name-override"), + re_path(r"^render-heavy-template$", views.render_template_view, name="render-heavy-template"), + re_path(r"^render-user-template$", views.render_user_view, name="render-user-template"), + re_path(r"^no-error$", views.no_error, name="elasticapm-no-error"), + re_path(r"^no-error-slash/$", views.no_error, name="elasticapm-no-error-slash"), + re_path(r"^http-error/(?P<status>[0-9]{3})$", views.http_error, name="elasticapm-http-error"), + re_path(r"^logging$", views.logging_view, name="elasticapm-logging"), + re_path(r"^ignored-exception/$", views.ignored_exception, name="elasticapm-ignored-exception"), + re_path(r"^fake-login$", views.fake_login, name="elasticapm-fake-login"), + re_path(r"^trigger-500$", views.raise_exc, name="elasticapm-raise-exc"), + re_path(r"^trigger-500-ioerror$", views.raise_ioerror, name="elasticapm-raise-ioerror"), + re_path(r"^trigger-500-decorated$", views.decorated_raise_exc, name="elasticapm-raise-exc-decor"), + re_path(r"^trigger-500-django$", views.django_exc, name="elasticapm-django-exc"), + re_path(r"^trigger-500-template$", views.template_exc, name="elasticapm-template-exc"), + re_path(r"^trigger-500-log-request$", views.logging_request_exc, name="elasticapm-log-request-exc"), + re_path(r"^streaming$", views.streaming_view, name="elasticapm-streaming-view"), + re_path(r"^name-override$", views.override_transaction_name_view, name="elasticapm-name-override"), ) if django.VERSION >= (1, 8): - urlpatterns += (url(r"^render-jinja2-template$", views.render_jinja2_template, name="render-jinja2-template"),) + urlpatterns += (re_path(r"^render-jinja2-template$", views.render_jinja2_template, name="render-jinja2-template"),) if django.VERSION >= (2, 2): from django.urls import path diff --git a/tests/requirements/reqs-django-3.2.txt b/tests/requirements/reqs-django-3.2.txt new file mode 100644 index 000000000..9414b669f --- /dev/null +++ b/tests/requirements/reqs-django-3.2.txt @@ -0,0 +1,2 @@ +Django>=3.2b1,<3.3 +-r reqs-base.txt
elastic__apm-agent-python-1436
[ci] tests.config.tests.test_config_all_upper_case failing It appears that the `tests.config.tests.test_config_all_upper_case` test is failing [across a number of different platforms on the master branch](https://apm-ci.elastic.co/job/apm-agent-python/job/apm-agent-python-mbp/job/master/593/testReport/): <img width="684" alt="Screen Shot 2021-12-16 at 9 04 34 AM" src="https://user-images.githubusercontent.com/111616/146331918-5ee9fa87-156b-42b1-8dc5-53ceea0d6df1.png">
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport logging\nimport logging.handlers\nimport math\nimport os\nimport re\nimport socket\nimport threading\n\nfrom elasticapm.conf.constants import BASE_SANITIZE_FIELD_NAMES\nfrom elasticapm.utils import compat, starmatch_to_regex\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.threading import IntervalTimer, ThreadManager\n\n__all__ = (\"setup_logging\", \"Config\")\n\n\nlogger = get_logger(\"elasticapm.conf\")\n\nlog_levels_map = {\n \"trace\": 5,\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"warn\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n \"off\": 1000,\n}\nlogfile_set_up = False\n\n\nclass ConfigurationError(ValueError):\n def __init__(self, msg, field_name):\n self.field_name = field_name\n super(ValueError, self).__init__(msg)\n\n\nclass _ConfigValue(object):\n \"\"\"\n Base class for configuration values\n\n dict_key\n String representing the key used for this config value in dict configs.\n env_key\n String representing the key used in environment variables for this\n config value. If not specified, will be set to `\"ELASTIC_APM_\" + dict_key`.\n type\n Type of value stored in this config value.\n validators\n List of validator classes. Must be callables, which will be called with\n a value and the dict_key for the config value. The validator either\n returns the validated value or raises a ConfigurationError if validation\n fails.\n callbacks\n List of functions which will be called when the config value is updated.\n The callbacks must match this signature:\n callback(dict_key, old_value, new_value, config_instance)\n\n Note that callbacks wait until the end of any given `update()` operation\n and are called at this point. This, coupled with the fact that callbacks\n receive the config instance, means that callbacks can utilize multiple\n configuration values (such as is the case for logging). This is\n complicated if more than one of the involved config values are\n dynamic, as both would need callbacks and the callback would need to\n be idempotent.\n callbacks_on_default\n Whether the callback should be called on config initialization if the\n default value is used. Default: True\n default\n The default for this config value if not user-configured.\n required\n Whether this config value is required. If a default is specified,\n this is a redundant option (except to ensure that this config value\n is specified if a default were ever to be removed).\n\n Note that _ConfigValues and any inheriting classes must implement __set__\n and __get__. The calling instance will always be a _ConfigBase descendant\n and the __set__ and __get__ calls will access `instance._values[self.dict_key]`\n to get and set values.\n \"\"\"\n\n def __init__(\n self,\n dict_key,\n env_key=None,\n type=compat.text_type,\n validators=None,\n callbacks=None,\n callbacks_on_default=True,\n default=None,\n required=False,\n ):\n self.type = type\n self.dict_key = dict_key\n self.validators = validators\n self.callbacks = callbacks\n self.default = default\n self.required = required\n if env_key is None:\n env_key = \"ELASTIC_APM_\" + dict_key\n self.env_key = env_key\n self.callbacks_on_default = callbacks_on_default\n\n def __get__(self, instance, owner):\n if instance:\n return instance._values.get(self.dict_key, self.default)\n else:\n return self.default\n\n def __set__(self, config_instance, value):\n value = self._validate(config_instance, value)\n self._callback_if_changed(config_instance, value)\n config_instance._values[self.dict_key] = value\n\n def _validate(self, instance, value):\n if value is None and self.required:\n raise ConfigurationError(\n \"Configuration error: value for {} is required.\".format(self.dict_key), self.dict_key\n )\n if self.validators and value is not None:\n for validator in self.validators:\n value = validator(value, self.dict_key)\n if self.type and value is not None:\n try:\n value = self.type(value)\n except ValueError as e:\n raise ConfigurationError(\"{}: {}\".format(self.dict_key, compat.text_type(e)), self.dict_key)\n instance._errors.pop(self.dict_key, None)\n return value\n\n def _callback_if_changed(self, instance, new_value):\n \"\"\"\n If the value changed (checked against instance._values[self.dict_key]),\n then run the callback function (if defined)\n \"\"\"\n old_value = instance._values.get(self.dict_key, self.default)\n if old_value != new_value:\n instance.callbacks_queue.append((self.dict_key, old_value, new_value))\n\n def call_callbacks(self, old_value, new_value, config_instance):\n if not self.callbacks:\n return\n for callback in self.callbacks:\n try:\n callback(self.dict_key, old_value, new_value, config_instance)\n except Exception as e:\n raise ConfigurationError(\n \"Callback {} raised an exception when setting {} to {}: {}\".format(\n callback, self.dict_key, new_value, e\n ),\n self.dict_key,\n )\n\n\nclass _ListConfigValue(_ConfigValue):\n def __init__(self, dict_key, list_separator=\",\", **kwargs):\n self.list_separator = list_separator\n super(_ListConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n value = value.split(self.list_separator)\n elif value is not None:\n value = list(value)\n if value:\n value = [self.type(item) for item in value]\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = value\n\n\nclass _DictConfigValue(_ConfigValue):\n def __init__(self, dict_key, item_separator=\",\", keyval_separator=\"=\", **kwargs):\n self.item_separator = item_separator\n self.keyval_separator = keyval_separator\n super(_DictConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n items = (item.split(self.keyval_separator) for item in value.split(self.item_separator))\n value = {key.strip(): self.type(val.strip()) for key, val in items}\n elif not isinstance(value, dict):\n # TODO: better error handling\n value = None\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = value\n\n\nclass _BoolConfigValue(_ConfigValue):\n def __init__(self, dict_key, true_string=\"true\", false_string=\"false\", **kwargs):\n self.true_string = true_string\n self.false_string = false_string\n super(_BoolConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n if value.lower() == self.true_string:\n value = True\n elif value.lower() == self.false_string:\n value = False\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = bool(value)\n\n\nclass RegexValidator(object):\n def __init__(self, regex, verbose_pattern=None):\n self.regex = regex\n self.verbose_pattern = verbose_pattern or regex\n\n def __call__(self, value, field_name):\n value = compat.text_type(value)\n match = re.match(self.regex, value)\n if match:\n return value\n raise ConfigurationError(\"{} does not match pattern {}\".format(value, self.verbose_pattern), field_name)\n\n\nclass UnitValidator(object):\n def __init__(self, regex, verbose_pattern, unit_multipliers):\n self.regex = regex\n self.verbose_pattern = verbose_pattern\n self.unit_multipliers = unit_multipliers\n\n def __call__(self, value, field_name):\n value = compat.text_type(value)\n match = re.match(self.regex, value, re.IGNORECASE)\n if not match:\n raise ConfigurationError(\"{} does not match pattern {}\".format(value, self.verbose_pattern), field_name)\n val, unit = match.groups()\n try:\n val = int(val) * self.unit_multipliers[unit]\n except KeyError:\n raise ConfigurationError(\"{} is not a supported unit\".format(unit), field_name)\n return val\n\n\nclass PrecisionValidator(object):\n \"\"\"\n Forces a float value to `precision` digits of precision.\n\n Rounds half away from zero.\n\n If `minimum` is provided, and the value rounds to 0 (but was not zero to\n begin with), use the minimum instead.\n \"\"\"\n\n def __init__(self, precision=0, minimum=None):\n self.precision = precision\n self.minimum = minimum\n\n def __call__(self, value, field_name):\n try:\n value = float(value)\n except ValueError:\n raise ConfigurationError(\"{} is not a float\".format(value), field_name)\n multiplier = 10 ** self.precision\n rounded = math.floor(value * multiplier + 0.5) / multiplier\n if rounded == 0 and self.minimum and value != 0:\n rounded = self.minimum\n return rounded\n\n\nduration_validator = UnitValidator(\n r\"^((?:-)?\\d+)(us|ms|s|m)$\", r\"\\d+(us|ms|s|m)\", {\"us\": 0.001, \"ms\": 1, \"s\": 1000, \"m\": 60000}\n)\nsize_validator = UnitValidator(\n r\"^(\\d+)(b|kb|mb|gb)$\", r\"\\d+(b|KB|MB|GB)\", {\"b\": 1, \"kb\": 1024, \"mb\": 1024 * 1024, \"gb\": 1024 * 1024 * 1024}\n)\n\n\nclass ExcludeRangeValidator(object):\n def __init__(self, range_start, range_end, range_desc):\n self.range_start = range_start\n self.range_end = range_end\n self.range_desc = range_desc\n\n def __call__(self, value, field_name):\n if self.range_start <= value <= self.range_end:\n raise ConfigurationError(\n \"{} cannot be in range: {}\".format(\n value, self.range_desc.format(**{\"range_start\": self.range_start, \"range_end\": self.range_end})\n ),\n field_name,\n )\n return value\n\n\nclass FileIsReadableValidator(object):\n def __call__(self, value, field_name):\n value = os.path.normpath(value)\n if not os.path.exists(value):\n raise ConfigurationError(\"{} does not exist\".format(value), field_name)\n elif not os.path.isfile(value):\n raise ConfigurationError(\"{} is not a file\".format(value), field_name)\n elif not os.access(value, os.R_OK):\n raise ConfigurationError(\"{} is not readable\".format(value), field_name)\n return value\n\n\nclass EnumerationValidator(object):\n \"\"\"\n Validator which ensures that a given config value is chosen from a list\n of valid string options.\n \"\"\"\n\n def __init__(self, valid_values, case_sensitive=False):\n \"\"\"\n valid_values\n List of valid string values for the config value\n case_sensitive\n Whether to compare case when comparing a value to the valid list.\n Defaults to False (case-insensitive)\n \"\"\"\n self.case_sensitive = case_sensitive\n if case_sensitive:\n self.valid_values = {s: s for s in valid_values}\n else:\n self.valid_values = {s.lower(): s for s in valid_values}\n\n def __call__(self, value, field_name):\n if self.case_sensitive:\n ret = self.valid_values.get(value)\n else:\n ret = self.valid_values.get(value.lower())\n if ret is None:\n raise ConfigurationError(\n \"{} is not in the list of valid values: {}\".format(value, list(self.valid_values.values())), field_name\n )\n return ret\n\n\ndef _log_level_callback(dict_key, old_value, new_value, config_instance):\n elasticapm_logger = logging.getLogger(\"elasticapm\")\n elasticapm_logger.setLevel(log_levels_map.get(new_value, 100))\n\n global logfile_set_up\n if not logfile_set_up and config_instance.log_file:\n logfile_set_up = True\n filehandler = logging.handlers.RotatingFileHandler(\n config_instance.log_file, maxBytes=config_instance.log_file_size, backupCount=1\n )\n try:\n import ecs_logging\n\n filehandler.setFormatter(ecs_logging.StdlibFormatter())\n except ImportError:\n pass\n elasticapm_logger.addHandler(filehandler)\n\n\ndef _log_ecs_reformatting_callback(dict_key, old_value, new_value, config_instance):\n \"\"\"\n If ecs_logging is installed and log_ecs_reformatting is set to \"override\", we should\n set the ecs_logging.StdlibFormatter as the formatted for every handler in\n the root logger, and set the default processor for structlog to the\n ecs_logging.StructlogFormatter.\n \"\"\"\n if new_value.lower() == \"override\":\n try:\n import ecs_logging\n except ImportError:\n return\n\n # Stdlib\n root_logger = logging.getLogger()\n formatter = ecs_logging.StdlibFormatter()\n for handler in root_logger.handlers:\n handler.setFormatter(formatter)\n\n # Structlog\n try:\n import structlog\n\n structlog.configure(processors=[ecs_logging.StructlogFormatter()])\n except ImportError:\n pass\n\n\nclass _ConfigBase(object):\n _NO_VALUE = object() # sentinel object\n\n def __init__(self, config_dict=None, env_dict=None, inline_dict=None, copy=False):\n \"\"\"\n config_dict\n Configuration dict as is common for frameworks such as flask and django.\n Keys match the _ConfigValue.dict_key (usually all caps)\n env_dict\n Environment variables dict. Keys match the _ConfigValue.env_key\n (usually \"ELASTIC_APM_\" + dict_key)\n inline_dict\n Any config passed in as kwargs to the Client object. Typically\n the keys match the names of the _ConfigValue variables in the Config\n object.\n copy\n Whether this object is being created to copy an existing Config\n object. If True, don't run the initial `update` (which would call\n callbacks if present)\n \"\"\"\n self._values = {}\n self._errors = {}\n self._dict_key_lookup = {}\n self.callbacks_queue = []\n for config_value in self.__class__.__dict__.values():\n if not isinstance(config_value, _ConfigValue):\n continue\n self._dict_key_lookup[config_value.dict_key] = config_value\n if not copy:\n self.update(config_dict, env_dict, inline_dict, initial=True)\n\n def update(self, config_dict=None, env_dict=None, inline_dict=None, initial=False):\n if config_dict is None:\n config_dict = {}\n if env_dict is None:\n env_dict = os.environ\n if inline_dict is None:\n inline_dict = {}\n for field, config_value in compat.iteritems(self.__class__.__dict__):\n if not isinstance(config_value, _ConfigValue):\n continue\n new_value = self._NO_VALUE\n # first check environment\n if config_value.env_key and config_value.env_key in env_dict:\n new_value = env_dict[config_value.env_key]\n # check the inline config\n elif field in inline_dict:\n new_value = inline_dict[field]\n # finally, check config dictionary\n elif config_value.dict_key in config_dict:\n new_value = config_dict[config_value.dict_key]\n # only set if new_value changed. We'll fall back to the field default if not.\n if new_value is not self._NO_VALUE:\n try:\n setattr(self, field, new_value)\n except ConfigurationError as e:\n self._errors[e.field_name] = str(e)\n # handle initial callbacks\n if (\n initial\n and config_value.callbacks_on_default\n and getattr(self, field) is not None\n and getattr(self, field) == config_value.default\n ):\n self.callbacks_queue.append((config_value.dict_key, self._NO_VALUE, config_value.default))\n # if a field has not been provided by any config source, we have to check separately if it is required\n if config_value.required and getattr(self, field) is None:\n self._errors[config_value.dict_key] = \"Configuration error: value for {} is required.\".format(\n config_value.dict_key\n )\n self.call_pending_callbacks()\n\n def call_pending_callbacks(self):\n \"\"\"\n Call callbacks for config options matching list of tuples:\n\n (dict_key, old_value, new_value)\n \"\"\"\n for dict_key, old_value, new_value in self.callbacks_queue:\n self._dict_key_lookup[dict_key].call_callbacks(old_value, new_value, self)\n self.callbacks_queue = []\n\n @property\n def values(self):\n return self._values\n\n @values.setter\n def values(self, values):\n self._values = values\n\n @property\n def errors(self):\n return self._errors\n\n def copy(self):\n c = self.__class__(copy=True)\n c._errors = {}\n c.values = self.values.copy()\n return c\n\n\nclass Config(_ConfigBase):\n service_name = _ConfigValue(\n \"SERVICE_NAME\", validators=[RegexValidator(\"^[a-zA-Z0-9 _-]+$\")], default=\"python_service\", required=True\n )\n service_node_name = _ConfigValue(\"SERVICE_NODE_NAME\")\n environment = _ConfigValue(\"ENVIRONMENT\")\n secret_token = _ConfigValue(\"SECRET_TOKEN\")\n api_key = _ConfigValue(\"API_KEY\")\n debug = _BoolConfigValue(\"DEBUG\", default=False)\n server_url = _ConfigValue(\"SERVER_URL\", default=\"http://localhost:8200\", required=True)\n server_cert = _ConfigValue(\"SERVER_CERT\", validators=[FileIsReadableValidator()])\n verify_server_cert = _BoolConfigValue(\"VERIFY_SERVER_CERT\", default=True)\n use_certifi = _BoolConfigValue(\"USE_CERTIFI\", default=True)\n include_paths = _ListConfigValue(\"INCLUDE_PATHS\")\n exclude_paths = _ListConfigValue(\"EXCLUDE_PATHS\", default=compat.get_default_library_patters())\n filter_exception_types = _ListConfigValue(\"FILTER_EXCEPTION_TYPES\")\n server_timeout = _ConfigValue(\n \"SERVER_TIMEOUT\",\n type=float,\n validators=[\n UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)?$\", r\"\\d+(ms|s|m)\", {\"ms\": 0.001, \"s\": 1, \"m\": 60, None: 1000})\n ],\n default=5,\n )\n hostname = _ConfigValue(\"HOSTNAME\", default=socket.gethostname())\n auto_log_stacks = _BoolConfigValue(\"AUTO_LOG_STACKS\", default=True)\n transport_class = _ConfigValue(\"TRANSPORT_CLASS\", default=\"elasticapm.transport.http.Transport\", required=True)\n processors = _ListConfigValue(\n \"PROCESSORS\",\n default=[\n \"elasticapm.processors.sanitize_stacktrace_locals\",\n \"elasticapm.processors.sanitize_http_request_cookies\",\n \"elasticapm.processors.sanitize_http_response_cookies\",\n \"elasticapm.processors.sanitize_http_headers\",\n \"elasticapm.processors.sanitize_http_wsgi_env\",\n \"elasticapm.processors.sanitize_http_request_body\",\n ],\n )\n sanitize_field_names = _ListConfigValue(\n \"SANITIZE_FIELD_NAMES\", type=starmatch_to_regex, default=BASE_SANITIZE_FIELD_NAMES\n )\n metrics_sets = _ListConfigValue(\n \"METRICS_SETS\",\n default=[\n \"elasticapm.metrics.sets.cpu.CPUMetricSet\",\n ],\n )\n metrics_interval = _ConfigValue(\n \"METRICS_INTERVAL\",\n type=int,\n validators=[duration_validator, ExcludeRangeValidator(1, 999, \"{range_start} - {range_end} ms\")],\n default=30000,\n )\n breakdown_metrics = _BoolConfigValue(\"BREAKDOWN_METRICS\", default=True)\n prometheus_metrics = _BoolConfigValue(\"PROMETHEUS_METRICS\", default=False)\n prometheus_metrics_prefix = _ConfigValue(\"PROMETHEUS_METRICS_PREFIX\", default=\"prometheus.metrics.\")\n disable_metrics = _ListConfigValue(\"DISABLE_METRICS\", type=starmatch_to_regex, default=[])\n central_config = _BoolConfigValue(\"CENTRAL_CONFIG\", default=True)\n api_request_size = _ConfigValue(\"API_REQUEST_SIZE\", type=int, validators=[size_validator], default=768 * 1024)\n api_request_time = _ConfigValue(\"API_REQUEST_TIME\", type=int, validators=[duration_validator], default=10 * 1000)\n transaction_sample_rate = _ConfigValue(\n \"TRANSACTION_SAMPLE_RATE\", type=float, validators=[PrecisionValidator(4, 0.0001)], default=1.0\n )\n transaction_max_spans = _ConfigValue(\"TRANSACTION_MAX_SPANS\", type=int, default=500)\n stack_trace_limit = _ConfigValue(\"STACK_TRACE_LIMIT\", type=int, default=500)\n span_frames_min_duration = _ConfigValue(\n \"SPAN_FRAMES_MIN_DURATION\",\n default=5,\n validators=[\n UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)?$\", r\"\\d+(ms|s|m)\", {\"ms\": 1, \"s\": 1000, \"m\": 60000, None: 1})\n ],\n type=int,\n )\n span_compression_enabled = _BoolConfigValue(\"SPAN_COMPRESSION_ENABLED\", default=False)\n span_compression_exact_match_max_duration = _ConfigValue(\n \"SPAN_COMPRESSION_EXACT_MATCH_MAX_DURATION\",\n default=50,\n validators=[duration_validator],\n type=int,\n )\n span_compression_same_kind_max_duration = _ConfigValue(\n \"SPAN_COMPRESSION_SAME_KIND_MAX_DURATION\",\n default=5,\n validators=[duration_validator],\n type=int,\n )\n exit_span_min_duration = _ConfigValue(\n \"exit_span_min_duration\",\n default=1,\n validators=[duration_validator],\n type=float,\n )\n collect_local_variables = _ConfigValue(\"COLLECT_LOCAL_VARIABLES\", default=\"errors\")\n source_lines_error_app_frames = _ConfigValue(\"SOURCE_LINES_ERROR_APP_FRAMES\", type=int, default=5)\n source_lines_error_library_frames = _ConfigValue(\"SOURCE_LINES_ERROR_LIBRARY_FRAMES\", type=int, default=5)\n source_lines_span_app_frames = _ConfigValue(\"SOURCE_LINES_SPAN_APP_FRAMES\", type=int, default=0)\n source_lines_span_library_frames = _ConfigValue(\"SOURCE_LINES_SPAN_LIBRARY_FRAMES\", type=int, default=0)\n local_var_max_length = _ConfigValue(\"LOCAL_VAR_MAX_LENGTH\", type=int, default=200)\n local_var_list_max_length = _ConfigValue(\"LOCAL_VAR_LIST_MAX_LENGTH\", type=int, default=10)\n local_var_dict_max_length = _ConfigValue(\"LOCAL_VAR_DICT_MAX_LENGTH\", type=int, default=10)\n capture_body = _ConfigValue(\n \"CAPTURE_BODY\",\n default=\"off\",\n validators=[lambda val, _: {\"errors\": \"error\", \"transactions\": \"transaction\"}.get(val, val)],\n )\n async_mode = _BoolConfigValue(\"ASYNC_MODE\", default=True)\n instrument_django_middleware = _BoolConfigValue(\"INSTRUMENT_DJANGO_MIDDLEWARE\", default=True)\n autoinsert_django_middleware = _BoolConfigValue(\"AUTOINSERT_DJANGO_MIDDLEWARE\", default=True)\n transactions_ignore_patterns = _ListConfigValue(\"TRANSACTIONS_IGNORE_PATTERNS\", default=[])\n transaction_ignore_urls = _ListConfigValue(\"TRANSACTION_IGNORE_URLS\", type=starmatch_to_regex, default=[])\n service_version = _ConfigValue(\"SERVICE_VERSION\")\n framework_name = _ConfigValue(\"FRAMEWORK_NAME\")\n framework_version = _ConfigValue(\"FRAMEWORK_VERSION\")\n global_labels = _DictConfigValue(\"GLOBAL_LABELS\")\n disable_send = _BoolConfigValue(\"DISABLE_SEND\", default=False)\n enabled = _BoolConfigValue(\"ENABLED\", default=True)\n recording = _BoolConfigValue(\"RECORDING\", default=True)\n instrument = _BoolConfigValue(\"INSTRUMENT\", default=True)\n enable_distributed_tracing = _BoolConfigValue(\"ENABLE_DISTRIBUTED_TRACING\", default=True)\n capture_headers = _BoolConfigValue(\"CAPTURE_HEADERS\", default=True)\n django_transaction_name_from_route = _BoolConfigValue(\"DJANGO_TRANSACTION_NAME_FROM_ROUTE\", default=False)\n disable_log_record_factory = _BoolConfigValue(\"DISABLE_LOG_RECORD_FACTORY\", default=False)\n use_elastic_traceparent_header = _BoolConfigValue(\"USE_ELASTIC_TRACEPARENT_HEADER\", default=True)\n use_elastic_excepthook = _BoolConfigValue(\"USE_ELASTIC_EXCEPTHOOK\", default=False)\n cloud_provider = _ConfigValue(\"CLOUD_PROVIDER\", default=True)\n log_level = _ConfigValue(\n \"LOG_LEVEL\",\n validators=[EnumerationValidator([\"trace\", \"debug\", \"info\", \"warning\", \"warn\", \"error\", \"critical\", \"off\"])],\n callbacks=[_log_level_callback],\n )\n log_file = _ConfigValue(\"LOG_FILE\", default=\"\")\n log_file_size = _ConfigValue(\"LOG_FILE_SIZE\", validators=[size_validator], type=int, default=50 * 1024 * 1024)\n log_ecs_reformatting = _ConfigValue(\n \"LOG_ECS_REFORMATTING\",\n validators=[EnumerationValidator([\"off\", \"override\"])],\n callbacks=[_log_ecs_reformatting_callback],\n default=\"off\",\n )\n\n @property\n def is_recording(self):\n if not self.enabled:\n return False\n else:\n return self.recording\n\n\nclass VersionedConfig(ThreadManager):\n \"\"\"\n A thin layer around Config that provides versioning\n \"\"\"\n\n __slots__ = (\n \"_config\",\n \"_version\",\n \"_first_config\",\n \"_first_version\",\n \"_lock\",\n \"transport\",\n \"_update_thread\",\n \"pid\",\n \"start_stop_order\",\n )\n\n def __init__(self, config_object, version, transport=None):\n \"\"\"\n Create a new VersionedConfig with an initial Config object\n :param config_object: the initial Config object\n :param version: a version identifier for the configuration\n \"\"\"\n self._config = self._first_config = config_object\n self._version = self._first_version = version\n self.transport = transport\n self._lock = threading.Lock()\n self._update_thread = None\n super(VersionedConfig, self).__init__()\n\n def update(self, version, **config):\n \"\"\"\n Update the configuration version\n :param version: version identifier for the new configuration\n :param config: a key/value map of new configuration\n :return: configuration errors, if any\n \"\"\"\n new_config = self._config.copy()\n\n # pass an empty env dict to ensure the environment doesn't get precedence\n new_config.update(inline_dict=config, env_dict={})\n if not new_config.errors:\n with self._lock:\n self._version = version\n self._config = new_config\n else:\n return new_config.errors\n\n def reset(self):\n \"\"\"\n Reset state to the original configuration\n\n Note that because ConfigurationValues can have callbacks, we need to\n note any differences between the original configuration and the most\n recent configuration and run any callbacks that might exist for those\n values.\n \"\"\"\n callbacks = []\n for key in compat.iterkeys(self._config.values):\n if key in self._first_config.values and self._config.values[key] != self._first_config.values[key]:\n callbacks.append((key, self._config.values[key], self._first_config.values[key]))\n\n with self._lock:\n self._version = self._first_version\n self._config = self._first_config\n\n self._config.callbacks_queue.extend(callbacks)\n self._config.call_pending_callbacks()\n\n @property\n def changed(self):\n return self._config != self._first_config\n\n def __getattr__(self, item):\n return getattr(self._config, item)\n\n def __setattr__(self, name, value):\n if name not in self.__slots__:\n setattr(self._config, name, value)\n else:\n super(VersionedConfig, self).__setattr__(name, value)\n\n @property\n def config_version(self):\n return self._version\n\n def update_config(self):\n if not self.transport:\n logger.warning(\"No transport set for config updates, skipping\")\n return\n logger.debug(\"Checking for new config...\")\n keys = {\"service\": {\"name\": self.service_name}}\n if self.environment:\n keys[\"service\"][\"environment\"] = self.environment\n new_version, new_config, next_run = self.transport.get_config(self.config_version, keys)\n if new_version and new_config:\n errors = self.update(new_version, **new_config)\n if errors:\n logger.error(\"Error applying new configuration: %s\", repr(errors))\n else:\n logger.info(\n \"Applied new remote configuration: %s\",\n \"; \".join(\n \"%s=%s\" % (compat.text_type(k), compat.text_type(v)) for k, v in compat.iteritems(new_config)\n ),\n )\n elif new_version == self.config_version:\n logger.debug(\"Remote config unchanged\")\n elif not new_config and self.changed:\n logger.debug(\"Remote config disappeared, resetting to original\")\n self.reset()\n\n return next_run\n\n def start_thread(self, pid=None):\n self._update_thread = IntervalTimer(\n self.update_config, 1, \"eapm conf updater\", daemon=True, evaluate_function_interval=True\n )\n self._update_thread.start()\n super(VersionedConfig, self).start_thread(pid=pid)\n\n def stop_thread(self):\n if self._update_thread:\n self._update_thread.cancel()\n self._update_thread = None\n\n\ndef setup_logging(handler):\n \"\"\"\n Configures logging to pipe to Elastic APM.\n\n For a typical Python install:\n\n >>> from elasticapm.handlers.logging import LoggingHandler\n >>> client = ElasticAPM(...)\n >>> setup_logging(LoggingHandler(client))\n\n Within Django:\n\n >>> from elasticapm.contrib.django.handlers import LoggingHandler\n >>> setup_logging(LoggingHandler())\n\n Returns a boolean based on if logging was configured or not.\n \"\"\"\n # TODO We should probably revisit this. Does it make more sense as\n # a method within the Client class? The Client object could easily\n # pass itself into LoggingHandler and we could eliminate args altogether.\n logger = logging.getLogger()\n if handler.__class__ in map(type, logger.handlers):\n return False\n\n logger.addHandler(handler)\n\n return True\n", "path": "elasticapm/conf/__init__.py" } ]
[ { "content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport logging\nimport logging.handlers\nimport math\nimport os\nimport re\nimport socket\nimport threading\n\nfrom elasticapm.conf.constants import BASE_SANITIZE_FIELD_NAMES\nfrom elasticapm.utils import compat, starmatch_to_regex\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.threading import IntervalTimer, ThreadManager\n\n__all__ = (\"setup_logging\", \"Config\")\n\n\nlogger = get_logger(\"elasticapm.conf\")\n\nlog_levels_map = {\n \"trace\": 5,\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"warn\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n \"off\": 1000,\n}\nlogfile_set_up = False\n\n\nclass ConfigurationError(ValueError):\n def __init__(self, msg, field_name):\n self.field_name = field_name\n super(ValueError, self).__init__(msg)\n\n\nclass _ConfigValue(object):\n \"\"\"\n Base class for configuration values\n\n dict_key\n String representing the key used for this config value in dict configs.\n env_key\n String representing the key used in environment variables for this\n config value. If not specified, will be set to `\"ELASTIC_APM_\" + dict_key`.\n type\n Type of value stored in this config value.\n validators\n List of validator classes. Must be callables, which will be called with\n a value and the dict_key for the config value. The validator either\n returns the validated value or raises a ConfigurationError if validation\n fails.\n callbacks\n List of functions which will be called when the config value is updated.\n The callbacks must match this signature:\n callback(dict_key, old_value, new_value, config_instance)\n\n Note that callbacks wait until the end of any given `update()` operation\n and are called at this point. This, coupled with the fact that callbacks\n receive the config instance, means that callbacks can utilize multiple\n configuration values (such as is the case for logging). This is\n complicated if more than one of the involved config values are\n dynamic, as both would need callbacks and the callback would need to\n be idempotent.\n callbacks_on_default\n Whether the callback should be called on config initialization if the\n default value is used. Default: True\n default\n The default for this config value if not user-configured.\n required\n Whether this config value is required. If a default is specified,\n this is a redundant option (except to ensure that this config value\n is specified if a default were ever to be removed).\n\n Note that _ConfigValues and any inheriting classes must implement __set__\n and __get__. The calling instance will always be a _ConfigBase descendant\n and the __set__ and __get__ calls will access `instance._values[self.dict_key]`\n to get and set values.\n \"\"\"\n\n def __init__(\n self,\n dict_key,\n env_key=None,\n type=compat.text_type,\n validators=None,\n callbacks=None,\n callbacks_on_default=True,\n default=None,\n required=False,\n ):\n self.type = type\n self.dict_key = dict_key\n self.validators = validators\n self.callbacks = callbacks\n self.default = default\n self.required = required\n if env_key is None:\n env_key = \"ELASTIC_APM_\" + dict_key\n self.env_key = env_key\n self.callbacks_on_default = callbacks_on_default\n\n def __get__(self, instance, owner):\n if instance:\n return instance._values.get(self.dict_key, self.default)\n else:\n return self.default\n\n def __set__(self, config_instance, value):\n value = self._validate(config_instance, value)\n self._callback_if_changed(config_instance, value)\n config_instance._values[self.dict_key] = value\n\n def _validate(self, instance, value):\n if value is None and self.required:\n raise ConfigurationError(\n \"Configuration error: value for {} is required.\".format(self.dict_key), self.dict_key\n )\n if self.validators and value is not None:\n for validator in self.validators:\n value = validator(value, self.dict_key)\n if self.type and value is not None:\n try:\n value = self.type(value)\n except ValueError as e:\n raise ConfigurationError(\"{}: {}\".format(self.dict_key, compat.text_type(e)), self.dict_key)\n instance._errors.pop(self.dict_key, None)\n return value\n\n def _callback_if_changed(self, instance, new_value):\n \"\"\"\n If the value changed (checked against instance._values[self.dict_key]),\n then run the callback function (if defined)\n \"\"\"\n old_value = instance._values.get(self.dict_key, self.default)\n if old_value != new_value:\n instance.callbacks_queue.append((self.dict_key, old_value, new_value))\n\n def call_callbacks(self, old_value, new_value, config_instance):\n if not self.callbacks:\n return\n for callback in self.callbacks:\n try:\n callback(self.dict_key, old_value, new_value, config_instance)\n except Exception as e:\n raise ConfigurationError(\n \"Callback {} raised an exception when setting {} to {}: {}\".format(\n callback, self.dict_key, new_value, e\n ),\n self.dict_key,\n )\n\n\nclass _ListConfigValue(_ConfigValue):\n def __init__(self, dict_key, list_separator=\",\", **kwargs):\n self.list_separator = list_separator\n super(_ListConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n value = value.split(self.list_separator)\n elif value is not None:\n value = list(value)\n if value:\n value = [self.type(item) for item in value]\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = value\n\n\nclass _DictConfigValue(_ConfigValue):\n def __init__(self, dict_key, item_separator=\",\", keyval_separator=\"=\", **kwargs):\n self.item_separator = item_separator\n self.keyval_separator = keyval_separator\n super(_DictConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n items = (item.split(self.keyval_separator) for item in value.split(self.item_separator))\n value = {key.strip(): self.type(val.strip()) for key, val in items}\n elif not isinstance(value, dict):\n # TODO: better error handling\n value = None\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = value\n\n\nclass _BoolConfigValue(_ConfigValue):\n def __init__(self, dict_key, true_string=\"true\", false_string=\"false\", **kwargs):\n self.true_string = true_string\n self.false_string = false_string\n super(_BoolConfigValue, self).__init__(dict_key, **kwargs)\n\n def __set__(self, instance, value):\n if isinstance(value, compat.string_types):\n if value.lower() == self.true_string:\n value = True\n elif value.lower() == self.false_string:\n value = False\n self._callback_if_changed(instance, value)\n instance._values[self.dict_key] = bool(value)\n\n\nclass RegexValidator(object):\n def __init__(self, regex, verbose_pattern=None):\n self.regex = regex\n self.verbose_pattern = verbose_pattern or regex\n\n def __call__(self, value, field_name):\n value = compat.text_type(value)\n match = re.match(self.regex, value)\n if match:\n return value\n raise ConfigurationError(\"{} does not match pattern {}\".format(value, self.verbose_pattern), field_name)\n\n\nclass UnitValidator(object):\n def __init__(self, regex, verbose_pattern, unit_multipliers):\n self.regex = regex\n self.verbose_pattern = verbose_pattern\n self.unit_multipliers = unit_multipliers\n\n def __call__(self, value, field_name):\n value = compat.text_type(value)\n match = re.match(self.regex, value, re.IGNORECASE)\n if not match:\n raise ConfigurationError(\"{} does not match pattern {}\".format(value, self.verbose_pattern), field_name)\n val, unit = match.groups()\n try:\n val = int(val) * self.unit_multipliers[unit]\n except KeyError:\n raise ConfigurationError(\"{} is not a supported unit\".format(unit), field_name)\n return val\n\n\nclass PrecisionValidator(object):\n \"\"\"\n Forces a float value to `precision` digits of precision.\n\n Rounds half away from zero.\n\n If `minimum` is provided, and the value rounds to 0 (but was not zero to\n begin with), use the minimum instead.\n \"\"\"\n\n def __init__(self, precision=0, minimum=None):\n self.precision = precision\n self.minimum = minimum\n\n def __call__(self, value, field_name):\n try:\n value = float(value)\n except ValueError:\n raise ConfigurationError(\"{} is not a float\".format(value), field_name)\n multiplier = 10 ** self.precision\n rounded = math.floor(value * multiplier + 0.5) / multiplier\n if rounded == 0 and self.minimum and value != 0:\n rounded = self.minimum\n return rounded\n\n\nduration_validator = UnitValidator(\n r\"^((?:-)?\\d+)(us|ms|s|m)$\", r\"\\d+(us|ms|s|m)\", {\"us\": 0.001, \"ms\": 1, \"s\": 1000, \"m\": 60000}\n)\nsize_validator = UnitValidator(\n r\"^(\\d+)(b|kb|mb|gb)$\", r\"\\d+(b|KB|MB|GB)\", {\"b\": 1, \"kb\": 1024, \"mb\": 1024 * 1024, \"gb\": 1024 * 1024 * 1024}\n)\n\n\nclass ExcludeRangeValidator(object):\n def __init__(self, range_start, range_end, range_desc):\n self.range_start = range_start\n self.range_end = range_end\n self.range_desc = range_desc\n\n def __call__(self, value, field_name):\n if self.range_start <= value <= self.range_end:\n raise ConfigurationError(\n \"{} cannot be in range: {}\".format(\n value, self.range_desc.format(**{\"range_start\": self.range_start, \"range_end\": self.range_end})\n ),\n field_name,\n )\n return value\n\n\nclass FileIsReadableValidator(object):\n def __call__(self, value, field_name):\n value = os.path.normpath(value)\n if not os.path.exists(value):\n raise ConfigurationError(\"{} does not exist\".format(value), field_name)\n elif not os.path.isfile(value):\n raise ConfigurationError(\"{} is not a file\".format(value), field_name)\n elif not os.access(value, os.R_OK):\n raise ConfigurationError(\"{} is not readable\".format(value), field_name)\n return value\n\n\nclass EnumerationValidator(object):\n \"\"\"\n Validator which ensures that a given config value is chosen from a list\n of valid string options.\n \"\"\"\n\n def __init__(self, valid_values, case_sensitive=False):\n \"\"\"\n valid_values\n List of valid string values for the config value\n case_sensitive\n Whether to compare case when comparing a value to the valid list.\n Defaults to False (case-insensitive)\n \"\"\"\n self.case_sensitive = case_sensitive\n if case_sensitive:\n self.valid_values = {s: s for s in valid_values}\n else:\n self.valid_values = {s.lower(): s for s in valid_values}\n\n def __call__(self, value, field_name):\n if self.case_sensitive:\n ret = self.valid_values.get(value)\n else:\n ret = self.valid_values.get(value.lower())\n if ret is None:\n raise ConfigurationError(\n \"{} is not in the list of valid values: {}\".format(value, list(self.valid_values.values())), field_name\n )\n return ret\n\n\ndef _log_level_callback(dict_key, old_value, new_value, config_instance):\n elasticapm_logger = logging.getLogger(\"elasticapm\")\n elasticapm_logger.setLevel(log_levels_map.get(new_value, 100))\n\n global logfile_set_up\n if not logfile_set_up and config_instance.log_file:\n logfile_set_up = True\n filehandler = logging.handlers.RotatingFileHandler(\n config_instance.log_file, maxBytes=config_instance.log_file_size, backupCount=1\n )\n try:\n import ecs_logging\n\n filehandler.setFormatter(ecs_logging.StdlibFormatter())\n except ImportError:\n pass\n elasticapm_logger.addHandler(filehandler)\n\n\ndef _log_ecs_reformatting_callback(dict_key, old_value, new_value, config_instance):\n \"\"\"\n If ecs_logging is installed and log_ecs_reformatting is set to \"override\", we should\n set the ecs_logging.StdlibFormatter as the formatted for every handler in\n the root logger, and set the default processor for structlog to the\n ecs_logging.StructlogFormatter.\n \"\"\"\n if new_value.lower() == \"override\":\n try:\n import ecs_logging\n except ImportError:\n return\n\n # Stdlib\n root_logger = logging.getLogger()\n formatter = ecs_logging.StdlibFormatter()\n for handler in root_logger.handlers:\n handler.setFormatter(formatter)\n\n # Structlog\n try:\n import structlog\n\n structlog.configure(processors=[ecs_logging.StructlogFormatter()])\n except ImportError:\n pass\n\n\nclass _ConfigBase(object):\n _NO_VALUE = object() # sentinel object\n\n def __init__(self, config_dict=None, env_dict=None, inline_dict=None, copy=False):\n \"\"\"\n config_dict\n Configuration dict as is common for frameworks such as flask and django.\n Keys match the _ConfigValue.dict_key (usually all caps)\n env_dict\n Environment variables dict. Keys match the _ConfigValue.env_key\n (usually \"ELASTIC_APM_\" + dict_key)\n inline_dict\n Any config passed in as kwargs to the Client object. Typically\n the keys match the names of the _ConfigValue variables in the Config\n object.\n copy\n Whether this object is being created to copy an existing Config\n object. If True, don't run the initial `update` (which would call\n callbacks if present)\n \"\"\"\n self._values = {}\n self._errors = {}\n self._dict_key_lookup = {}\n self.callbacks_queue = []\n for config_value in self.__class__.__dict__.values():\n if not isinstance(config_value, _ConfigValue):\n continue\n self._dict_key_lookup[config_value.dict_key] = config_value\n if not copy:\n self.update(config_dict, env_dict, inline_dict, initial=True)\n\n def update(self, config_dict=None, env_dict=None, inline_dict=None, initial=False):\n if config_dict is None:\n config_dict = {}\n if env_dict is None:\n env_dict = os.environ\n if inline_dict is None:\n inline_dict = {}\n for field, config_value in compat.iteritems(self.__class__.__dict__):\n if not isinstance(config_value, _ConfigValue):\n continue\n new_value = self._NO_VALUE\n # first check environment\n if config_value.env_key and config_value.env_key in env_dict:\n new_value = env_dict[config_value.env_key]\n # check the inline config\n elif field in inline_dict:\n new_value = inline_dict[field]\n # finally, check config dictionary\n elif config_value.dict_key in config_dict:\n new_value = config_dict[config_value.dict_key]\n # only set if new_value changed. We'll fall back to the field default if not.\n if new_value is not self._NO_VALUE:\n try:\n setattr(self, field, new_value)\n except ConfigurationError as e:\n self._errors[e.field_name] = str(e)\n # handle initial callbacks\n if (\n initial\n and config_value.callbacks_on_default\n and getattr(self, field) is not None\n and getattr(self, field) == config_value.default\n ):\n self.callbacks_queue.append((config_value.dict_key, self._NO_VALUE, config_value.default))\n # if a field has not been provided by any config source, we have to check separately if it is required\n if config_value.required and getattr(self, field) is None:\n self._errors[config_value.dict_key] = \"Configuration error: value for {} is required.\".format(\n config_value.dict_key\n )\n self.call_pending_callbacks()\n\n def call_pending_callbacks(self):\n \"\"\"\n Call callbacks for config options matching list of tuples:\n\n (dict_key, old_value, new_value)\n \"\"\"\n for dict_key, old_value, new_value in self.callbacks_queue:\n self._dict_key_lookup[dict_key].call_callbacks(old_value, new_value, self)\n self.callbacks_queue = []\n\n @property\n def values(self):\n return self._values\n\n @values.setter\n def values(self, values):\n self._values = values\n\n @property\n def errors(self):\n return self._errors\n\n def copy(self):\n c = self.__class__(copy=True)\n c._errors = {}\n c.values = self.values.copy()\n return c\n\n\nclass Config(_ConfigBase):\n service_name = _ConfigValue(\n \"SERVICE_NAME\", validators=[RegexValidator(\"^[a-zA-Z0-9 _-]+$\")], default=\"python_service\", required=True\n )\n service_node_name = _ConfigValue(\"SERVICE_NODE_NAME\")\n environment = _ConfigValue(\"ENVIRONMENT\")\n secret_token = _ConfigValue(\"SECRET_TOKEN\")\n api_key = _ConfigValue(\"API_KEY\")\n debug = _BoolConfigValue(\"DEBUG\", default=False)\n server_url = _ConfigValue(\"SERVER_URL\", default=\"http://localhost:8200\", required=True)\n server_cert = _ConfigValue(\"SERVER_CERT\", validators=[FileIsReadableValidator()])\n verify_server_cert = _BoolConfigValue(\"VERIFY_SERVER_CERT\", default=True)\n use_certifi = _BoolConfigValue(\"USE_CERTIFI\", default=True)\n include_paths = _ListConfigValue(\"INCLUDE_PATHS\")\n exclude_paths = _ListConfigValue(\"EXCLUDE_PATHS\", default=compat.get_default_library_patters())\n filter_exception_types = _ListConfigValue(\"FILTER_EXCEPTION_TYPES\")\n server_timeout = _ConfigValue(\n \"SERVER_TIMEOUT\",\n type=float,\n validators=[\n UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)?$\", r\"\\d+(ms|s|m)\", {\"ms\": 0.001, \"s\": 1, \"m\": 60, None: 1000})\n ],\n default=5,\n )\n hostname = _ConfigValue(\"HOSTNAME\", default=socket.gethostname())\n auto_log_stacks = _BoolConfigValue(\"AUTO_LOG_STACKS\", default=True)\n transport_class = _ConfigValue(\"TRANSPORT_CLASS\", default=\"elasticapm.transport.http.Transport\", required=True)\n processors = _ListConfigValue(\n \"PROCESSORS\",\n default=[\n \"elasticapm.processors.sanitize_stacktrace_locals\",\n \"elasticapm.processors.sanitize_http_request_cookies\",\n \"elasticapm.processors.sanitize_http_response_cookies\",\n \"elasticapm.processors.sanitize_http_headers\",\n \"elasticapm.processors.sanitize_http_wsgi_env\",\n \"elasticapm.processors.sanitize_http_request_body\",\n ],\n )\n sanitize_field_names = _ListConfigValue(\n \"SANITIZE_FIELD_NAMES\", type=starmatch_to_regex, default=BASE_SANITIZE_FIELD_NAMES\n )\n metrics_sets = _ListConfigValue(\n \"METRICS_SETS\",\n default=[\n \"elasticapm.metrics.sets.cpu.CPUMetricSet\",\n ],\n )\n metrics_interval = _ConfigValue(\n \"METRICS_INTERVAL\",\n type=int,\n validators=[duration_validator, ExcludeRangeValidator(1, 999, \"{range_start} - {range_end} ms\")],\n default=30000,\n )\n breakdown_metrics = _BoolConfigValue(\"BREAKDOWN_METRICS\", default=True)\n prometheus_metrics = _BoolConfigValue(\"PROMETHEUS_METRICS\", default=False)\n prometheus_metrics_prefix = _ConfigValue(\"PROMETHEUS_METRICS_PREFIX\", default=\"prometheus.metrics.\")\n disable_metrics = _ListConfigValue(\"DISABLE_METRICS\", type=starmatch_to_regex, default=[])\n central_config = _BoolConfigValue(\"CENTRAL_CONFIG\", default=True)\n api_request_size = _ConfigValue(\"API_REQUEST_SIZE\", type=int, validators=[size_validator], default=768 * 1024)\n api_request_time = _ConfigValue(\"API_REQUEST_TIME\", type=int, validators=[duration_validator], default=10 * 1000)\n transaction_sample_rate = _ConfigValue(\n \"TRANSACTION_SAMPLE_RATE\", type=float, validators=[PrecisionValidator(4, 0.0001)], default=1.0\n )\n transaction_max_spans = _ConfigValue(\"TRANSACTION_MAX_SPANS\", type=int, default=500)\n stack_trace_limit = _ConfigValue(\"STACK_TRACE_LIMIT\", type=int, default=500)\n span_frames_min_duration = _ConfigValue(\n \"SPAN_FRAMES_MIN_DURATION\",\n default=5,\n validators=[\n UnitValidator(r\"^((?:-)?\\d+)(ms|s|m)?$\", r\"\\d+(ms|s|m)\", {\"ms\": 1, \"s\": 1000, \"m\": 60000, None: 1})\n ],\n type=int,\n )\n span_compression_enabled = _BoolConfigValue(\"SPAN_COMPRESSION_ENABLED\", default=False)\n span_compression_exact_match_max_duration = _ConfigValue(\n \"SPAN_COMPRESSION_EXACT_MATCH_MAX_DURATION\",\n default=50,\n validators=[duration_validator],\n type=int,\n )\n span_compression_same_kind_max_duration = _ConfigValue(\n \"SPAN_COMPRESSION_SAME_KIND_MAX_DURATION\",\n default=5,\n validators=[duration_validator],\n type=int,\n )\n exit_span_min_duration = _ConfigValue(\n \"EXIT_SPAN_MIN_DURATION\",\n default=1,\n validators=[duration_validator],\n type=float,\n )\n collect_local_variables = _ConfigValue(\"COLLECT_LOCAL_VARIABLES\", default=\"errors\")\n source_lines_error_app_frames = _ConfigValue(\"SOURCE_LINES_ERROR_APP_FRAMES\", type=int, default=5)\n source_lines_error_library_frames = _ConfigValue(\"SOURCE_LINES_ERROR_LIBRARY_FRAMES\", type=int, default=5)\n source_lines_span_app_frames = _ConfigValue(\"SOURCE_LINES_SPAN_APP_FRAMES\", type=int, default=0)\n source_lines_span_library_frames = _ConfigValue(\"SOURCE_LINES_SPAN_LIBRARY_FRAMES\", type=int, default=0)\n local_var_max_length = _ConfigValue(\"LOCAL_VAR_MAX_LENGTH\", type=int, default=200)\n local_var_list_max_length = _ConfigValue(\"LOCAL_VAR_LIST_MAX_LENGTH\", type=int, default=10)\n local_var_dict_max_length = _ConfigValue(\"LOCAL_VAR_DICT_MAX_LENGTH\", type=int, default=10)\n capture_body = _ConfigValue(\n \"CAPTURE_BODY\",\n default=\"off\",\n validators=[lambda val, _: {\"errors\": \"error\", \"transactions\": \"transaction\"}.get(val, val)],\n )\n async_mode = _BoolConfigValue(\"ASYNC_MODE\", default=True)\n instrument_django_middleware = _BoolConfigValue(\"INSTRUMENT_DJANGO_MIDDLEWARE\", default=True)\n autoinsert_django_middleware = _BoolConfigValue(\"AUTOINSERT_DJANGO_MIDDLEWARE\", default=True)\n transactions_ignore_patterns = _ListConfigValue(\"TRANSACTIONS_IGNORE_PATTERNS\", default=[])\n transaction_ignore_urls = _ListConfigValue(\"TRANSACTION_IGNORE_URLS\", type=starmatch_to_regex, default=[])\n service_version = _ConfigValue(\"SERVICE_VERSION\")\n framework_name = _ConfigValue(\"FRAMEWORK_NAME\")\n framework_version = _ConfigValue(\"FRAMEWORK_VERSION\")\n global_labels = _DictConfigValue(\"GLOBAL_LABELS\")\n disable_send = _BoolConfigValue(\"DISABLE_SEND\", default=False)\n enabled = _BoolConfigValue(\"ENABLED\", default=True)\n recording = _BoolConfigValue(\"RECORDING\", default=True)\n instrument = _BoolConfigValue(\"INSTRUMENT\", default=True)\n enable_distributed_tracing = _BoolConfigValue(\"ENABLE_DISTRIBUTED_TRACING\", default=True)\n capture_headers = _BoolConfigValue(\"CAPTURE_HEADERS\", default=True)\n django_transaction_name_from_route = _BoolConfigValue(\"DJANGO_TRANSACTION_NAME_FROM_ROUTE\", default=False)\n disable_log_record_factory = _BoolConfigValue(\"DISABLE_LOG_RECORD_FACTORY\", default=False)\n use_elastic_traceparent_header = _BoolConfigValue(\"USE_ELASTIC_TRACEPARENT_HEADER\", default=True)\n use_elastic_excepthook = _BoolConfigValue(\"USE_ELASTIC_EXCEPTHOOK\", default=False)\n cloud_provider = _ConfigValue(\"CLOUD_PROVIDER\", default=True)\n log_level = _ConfigValue(\n \"LOG_LEVEL\",\n validators=[EnumerationValidator([\"trace\", \"debug\", \"info\", \"warning\", \"warn\", \"error\", \"critical\", \"off\"])],\n callbacks=[_log_level_callback],\n )\n log_file = _ConfigValue(\"LOG_FILE\", default=\"\")\n log_file_size = _ConfigValue(\"LOG_FILE_SIZE\", validators=[size_validator], type=int, default=50 * 1024 * 1024)\n log_ecs_reformatting = _ConfigValue(\n \"LOG_ECS_REFORMATTING\",\n validators=[EnumerationValidator([\"off\", \"override\"])],\n callbacks=[_log_ecs_reformatting_callback],\n default=\"off\",\n )\n\n @property\n def is_recording(self):\n if not self.enabled:\n return False\n else:\n return self.recording\n\n\nclass VersionedConfig(ThreadManager):\n \"\"\"\n A thin layer around Config that provides versioning\n \"\"\"\n\n __slots__ = (\n \"_config\",\n \"_version\",\n \"_first_config\",\n \"_first_version\",\n \"_lock\",\n \"transport\",\n \"_update_thread\",\n \"pid\",\n \"start_stop_order\",\n )\n\n def __init__(self, config_object, version, transport=None):\n \"\"\"\n Create a new VersionedConfig with an initial Config object\n :param config_object: the initial Config object\n :param version: a version identifier for the configuration\n \"\"\"\n self._config = self._first_config = config_object\n self._version = self._first_version = version\n self.transport = transport\n self._lock = threading.Lock()\n self._update_thread = None\n super(VersionedConfig, self).__init__()\n\n def update(self, version, **config):\n \"\"\"\n Update the configuration version\n :param version: version identifier for the new configuration\n :param config: a key/value map of new configuration\n :return: configuration errors, if any\n \"\"\"\n new_config = self._config.copy()\n\n # pass an empty env dict to ensure the environment doesn't get precedence\n new_config.update(inline_dict=config, env_dict={})\n if not new_config.errors:\n with self._lock:\n self._version = version\n self._config = new_config\n else:\n return new_config.errors\n\n def reset(self):\n \"\"\"\n Reset state to the original configuration\n\n Note that because ConfigurationValues can have callbacks, we need to\n note any differences between the original configuration and the most\n recent configuration and run any callbacks that might exist for those\n values.\n \"\"\"\n callbacks = []\n for key in compat.iterkeys(self._config.values):\n if key in self._first_config.values and self._config.values[key] != self._first_config.values[key]:\n callbacks.append((key, self._config.values[key], self._first_config.values[key]))\n\n with self._lock:\n self._version = self._first_version\n self._config = self._first_config\n\n self._config.callbacks_queue.extend(callbacks)\n self._config.call_pending_callbacks()\n\n @property\n def changed(self):\n return self._config != self._first_config\n\n def __getattr__(self, item):\n return getattr(self._config, item)\n\n def __setattr__(self, name, value):\n if name not in self.__slots__:\n setattr(self._config, name, value)\n else:\n super(VersionedConfig, self).__setattr__(name, value)\n\n @property\n def config_version(self):\n return self._version\n\n def update_config(self):\n if not self.transport:\n logger.warning(\"No transport set for config updates, skipping\")\n return\n logger.debug(\"Checking for new config...\")\n keys = {\"service\": {\"name\": self.service_name}}\n if self.environment:\n keys[\"service\"][\"environment\"] = self.environment\n new_version, new_config, next_run = self.transport.get_config(self.config_version, keys)\n if new_version and new_config:\n errors = self.update(new_version, **new_config)\n if errors:\n logger.error(\"Error applying new configuration: %s\", repr(errors))\n else:\n logger.info(\n \"Applied new remote configuration: %s\",\n \"; \".join(\n \"%s=%s\" % (compat.text_type(k), compat.text_type(v)) for k, v in compat.iteritems(new_config)\n ),\n )\n elif new_version == self.config_version:\n logger.debug(\"Remote config unchanged\")\n elif not new_config and self.changed:\n logger.debug(\"Remote config disappeared, resetting to original\")\n self.reset()\n\n return next_run\n\n def start_thread(self, pid=None):\n self._update_thread = IntervalTimer(\n self.update_config, 1, \"eapm conf updater\", daemon=True, evaluate_function_interval=True\n )\n self._update_thread.start()\n super(VersionedConfig, self).start_thread(pid=pid)\n\n def stop_thread(self):\n if self._update_thread:\n self._update_thread.cancel()\n self._update_thread = None\n\n\ndef setup_logging(handler):\n \"\"\"\n Configures logging to pipe to Elastic APM.\n\n For a typical Python install:\n\n >>> from elasticapm.handlers.logging import LoggingHandler\n >>> client = ElasticAPM(...)\n >>> setup_logging(LoggingHandler(client))\n\n Within Django:\n\n >>> from elasticapm.contrib.django.handlers import LoggingHandler\n >>> setup_logging(LoggingHandler())\n\n Returns a boolean based on if logging was configured or not.\n \"\"\"\n # TODO We should probably revisit this. Does it make more sense as\n # a method within the Client class? The Client object could easily\n # pass itself into LoggingHandler and we could eliminate args altogether.\n logger = logging.getLogger()\n if handler.__class__ in map(type, logger.handlers):\n return False\n\n logger.addHandler(handler)\n\n return True\n", "path": "elasticapm/conf/__init__.py" } ]
diff --git a/elasticapm/conf/__init__.py b/elasticapm/conf/__init__.py index 65de90dca..660539b6e 100644 --- a/elasticapm/conf/__init__.py +++ b/elasticapm/conf/__init__.py @@ -594,7 +594,7 @@ class Config(_ConfigBase): type=int, ) exit_span_min_duration = _ConfigValue( - "exit_span_min_duration", + "EXIT_SPAN_MIN_DURATION", default=1, validators=[duration_validator], type=float,
conda__conda-build-570
AppVeyor: AttributeError: 'module' object has no attribute 'get_pid_list https://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1187 conda info: https://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1076 Please note a few lines above I ran: `C:\Anaconda\Scripts\conda.exe install --yes --quiet anaconda-client conda-build jinja2`
[ { "content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport shutil\nfrom os.path import dirname, isdir, isfile, join, exists\n\nimport conda.config as cc\nfrom conda.compat import iteritems\n\nfrom conda_build.config import config\nfrom conda_build import environ\nfrom conda_build import source\nfrom conda_build.utils import _check_call\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nassert sys.platform == 'win32'\n\n\ndef fix_staged_scripts():\n \"\"\"\n Fixes scripts which have been installed unix-style to have a .bat\n helper\n \"\"\"\n scripts_dir = join(config.build_prefix, 'Scripts')\n if not isdir(scripts_dir):\n return\n for fn in os.listdir(scripts_dir):\n # process all the extensionless files\n if not isfile(join(scripts_dir, fn)) or '.' in fn:\n continue\n\n with open(join(scripts_dir, fn)) as f:\n line = f.readline().lower()\n # If it's a #!python script\n if not (line.startswith('#!') and 'python' in line.lower()):\n continue\n print('Adjusting unix-style #! script %s, '\n 'and adding a .bat file for it' % fn)\n # copy it with a .py extension (skipping that first #! line)\n with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n fo.write(f.read())\n # now create the .exe file\n shutil.copyfile(join(dirname(__file__),\n 'cli-%d.exe' % (8 * tuple.__itemsize__)),\n join(scripts_dir, fn + '.exe'))\n\n # remove the original script\n os.remove(join(scripts_dir, fn))\n\n\ndef msvc_env_cmd():\n if 'ProgramFiles(x86)' in os.environ:\n program_files = os.environ['ProgramFiles(x86)']\n else:\n program_files = os.environ['ProgramFiles']\n\n localappdata = os.environ.get(\"localappdata\")\n\n if config.PY3K:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 10.0'\n r'\\VC\\vcvarsall.bat')\n else:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 9.0'\n r'\\VC\\vcvarsall.bat')\n\n # Try the Microsoft Visual C++ Compiler for Python 2.7\n if not isfile(vcvarsall) and localappdata and not config.PY3K:\n vcvarsall = os.path.join(localappdata, \"Programs\", \"Common\",\n \"Microsoft\", \"Visual C++ for Python\", \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall) and program_files and not config.PY3K:\n vcvarsall = os.path.join(program_files, 'Common Files',\n 'Microsoft', 'Visual C++ for Python', \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall):\n print(\"Warning: Couldn't find Visual Studio: %r\" % vcvarsall)\n return ''\n\n return '''\\\ncall \"%s\" %s\n''' % (vcvarsall, {32: 'x86', 64: 'amd64'}[cc.bits])\n\n\ndef kill_processes():\n if psutil is None:\n return\n for n in psutil.get_pid_list():\n try:\n p = psutil.Process(n)\n if p.name.lower() == 'msbuild.exe':\n print('Terminating:', p.name)\n p.terminate()\n except:\n continue\n\n\ndef build(m):\n env = dict(os.environ)\n env.update(environ.get_dict(m))\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = source.get_dir()\n bld_bat = join(m.path, 'bld.bat')\n if exists(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n fo.write(msvc_env_cmd())\n for kv in iteritems(env):\n fo.write('set \"%s=%s\"\\n' % kv)\n # more debuggable with echo on\n fo.write('@echo on\\n')\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = [os.environ['COMSPEC'], '/c', 'call', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n kill_processes()\n fix_staged_scripts()\n", "path": "conda_build/windows.py" } ]
[ { "content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport shutil\nfrom os.path import dirname, isdir, isfile, join, exists\n\nimport conda.config as cc\nfrom conda.compat import iteritems\n\nfrom conda_build.config import config\nfrom conda_build import environ\nfrom conda_build import source\nfrom conda_build.utils import _check_call\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nassert sys.platform == 'win32'\n\n\ndef fix_staged_scripts():\n \"\"\"\n Fixes scripts which have been installed unix-style to have a .bat\n helper\n \"\"\"\n scripts_dir = join(config.build_prefix, 'Scripts')\n if not isdir(scripts_dir):\n return\n for fn in os.listdir(scripts_dir):\n # process all the extensionless files\n if not isfile(join(scripts_dir, fn)) or '.' in fn:\n continue\n\n with open(join(scripts_dir, fn)) as f:\n line = f.readline().lower()\n # If it's a #!python script\n if not (line.startswith('#!') and 'python' in line.lower()):\n continue\n print('Adjusting unix-style #! script %s, '\n 'and adding a .bat file for it' % fn)\n # copy it with a .py extension (skipping that first #! line)\n with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n fo.write(f.read())\n # now create the .exe file\n shutil.copyfile(join(dirname(__file__),\n 'cli-%d.exe' % (8 * tuple.__itemsize__)),\n join(scripts_dir, fn + '.exe'))\n\n # remove the original script\n os.remove(join(scripts_dir, fn))\n\n\ndef msvc_env_cmd():\n if 'ProgramFiles(x86)' in os.environ:\n program_files = os.environ['ProgramFiles(x86)']\n else:\n program_files = os.environ['ProgramFiles']\n\n localappdata = os.environ.get(\"localappdata\")\n\n if config.PY3K:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 10.0'\n r'\\VC\\vcvarsall.bat')\n else:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 9.0'\n r'\\VC\\vcvarsall.bat')\n\n # Try the Microsoft Visual C++ Compiler for Python 2.7\n if not isfile(vcvarsall) and localappdata and not config.PY3K:\n vcvarsall = os.path.join(localappdata, \"Programs\", \"Common\",\n \"Microsoft\", \"Visual C++ for Python\", \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall) and program_files and not config.PY3K:\n vcvarsall = os.path.join(program_files, 'Common Files',\n 'Microsoft', 'Visual C++ for Python', \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall):\n print(\"Warning: Couldn't find Visual Studio: %r\" % vcvarsall)\n return ''\n\n return '''\\\ncall \"%s\" %s\n''' % (vcvarsall, {32: 'x86', 64: 'amd64'}[cc.bits])\n\n\ndef kill_processes():\n if psutil is None:\n return\n for n in psutil.pids():\n try:\n p = psutil.Process(n)\n if p.name.lower() == 'msbuild.exe':\n print('Terminating:', p.name)\n p.terminate()\n except:\n continue\n\n\ndef build(m):\n env = dict(os.environ)\n env.update(environ.get_dict(m))\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = source.get_dir()\n bld_bat = join(m.path, 'bld.bat')\n if exists(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n fo.write(msvc_env_cmd())\n for kv in iteritems(env):\n fo.write('set \"%s=%s\"\\n' % kv)\n # more debuggable with echo on\n fo.write('@echo on\\n')\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = [os.environ['COMSPEC'], '/c', 'call', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n kill_processes()\n fix_staged_scripts()\n", "path": "conda_build/windows.py" } ]
diff --git a/conda_build/windows.py b/conda_build/windows.py index 31621b82da..74592b709a 100644 --- a/conda_build/windows.py +++ b/conda_build/windows.py @@ -89,7 +89,7 @@ def msvc_env_cmd(): def kill_processes(): if psutil is None: return - for n in psutil.get_pid_list(): + for n in psutil.pids(): try: p = psutil.Process(n) if p.name.lower() == 'msbuild.exe':
ibis-project__ibis-3630
bug(duckdb): duckdb backend should add in CAST for some bind parameters DuckDB casts bind parameters `?` to strings which leads to binder errors with some queries If we have a small tpch dataset: ```python import duckdb con = duckdb.connect("tpch.ddb") con.execute("CALL dbgen(sf=0.1)") import ibis con = ibis.duckdb.connect("tpch.ddb") t = con.table('orders') expr = t.aggregate(high_line_count=(t.o_orderpriority.case().when('1-URGENT', 1).else_(0).end().sum() expr.execute() ``` raises ``` RuntimeError: Binder Error: No function matches the given name and argument types 'sum(VARCHAR)'. You might need to add explicit type casts. Candidate functions: sum(DECIMAL) -> DECIMAL sum(SMALLINT) -> HUGEINT sum(INTEGER) -> HUGEINT sum(BIGINT) -> HUGEINT sum(HUGEINT) -> HUGEINT sum(DOUBLE) -> DOUBLE LINE 1: SELECT sum(CASE WHEN (t0.o_orderpriority = ?) ... ``` because our generated SQL doesn't have explicit casts: ``` print(expr.compile()) SELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN ? ELSE ? END) AS high_line_count FROM orders AS t0 ``` we want to generate ``` SELECT sum(CASE WHEN (t0.o_orderpriority = ?) THEN cast(? as INTEGER) ELSE cast(? as INTEGER) END) AS high_line_count FROM orders as t0 ```
[ { "content": "import collections\nimport operator\n\nimport numpy as np\nimport sqlalchemy as sa\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nfrom ibis.backends.base.sql.alchemy import to_sqla_type, unary\n\nfrom ..base.sql.alchemy.registry import _geospatial_functions, _table_column\nfrom ..postgres.registry import fixed_arity, operation_registry\n\noperation_registry = {\n op: operation_registry[op]\n # duckdb does not support geospatial operations, but shares most of the\n # remaining postgres rules\n for op in operation_registry.keys() - _geospatial_functions.keys()\n}\n\n\ndef _round(t, expr):\n arg, digits = expr.op().args\n sa_arg = t.translate(arg)\n\n if digits is None:\n return sa.func.round(sa_arg)\n\n return sa.func.round(sa_arg, t.translate(digits))\n\n\n_LOG_BASE_FUNCS = {\n 2: sa.func.log2,\n 10: sa.func.log,\n}\n\n\ndef _generic_log(arg, base):\n return sa.func.ln(arg) / sa.func.ln(base)\n\n\ndef _log(t, expr):\n arg, base = expr.op().args\n sa_arg = t.translate(arg)\n if base is not None:\n sa_base = t.translate(base)\n try:\n base_value = sa_base.value\n except AttributeError:\n return _generic_log(sa_arg, sa_base)\n else:\n func = _LOG_BASE_FUNCS.get(base_value, _generic_log)\n return func(sa_arg)\n return sa.func.ln(sa_arg)\n\n\ndef _timestamp_from_unix(t, expr):\n op = expr.op()\n arg, unit = op.args\n arg = t.translate(arg)\n\n if unit in {\"us\", \"ns\"}:\n raise ValueError(f\"`{unit}` unit is not supported!\")\n\n if unit == \"ms\":\n return sa.func.epoch_ms(arg)\n elif unit == \"s\":\n return sa.func.to_timestamp(arg)\n\n\ndef _literal(_, expr):\n dtype = expr.type()\n sqla_type = to_sqla_type(dtype)\n op = expr.op()\n value = op.value\n\n if isinstance(dtype, dt.Interval):\n return sa.text(f\"INTERVAL '{value} {dtype.resolution}'\")\n elif isinstance(dtype, dt.Set) or (\n isinstance(value, collections.abc.Sequence)\n and not isinstance(value, str)\n ):\n return sa.cast(sa.func.list_value(*value), sqla_type)\n elif isinstance(value, np.ndarray):\n return sa.cast(sa.func.list_value(*value.tolist()), sqla_type)\n elif isinstance(value, collections.abc.Mapping):\n if isinstance(dtype, dt.Struct):\n placeholders = \", \".join(\n f\"{key!r}: :v{i}\" for i, key in enumerate(value.keys())\n )\n return sa.text(f\"{{{placeholders}}}\").bindparams(\n *(\n sa.bindparam(f\"v{i:d}\", val)\n for i, val in enumerate(value.values())\n )\n )\n raise NotImplementedError(\n f\"Ibis dtype `{dtype}` with mapping type \"\n f\"`{type(value).__name__}` isn't yet supported with the duckdb \"\n \"backend\"\n )\n return sa.literal(value)\n\n\ndef _array_column(t, expr):\n (arg,) = expr.op().args\n sqla_type = to_sqla_type(expr.type())\n return sa.cast(sa.func.list_value(*map(t.translate, arg)), sqla_type)\n\n\ndef _struct_field(t, expr):\n op = expr.op()\n return sa.func.struct_extract(\n t.translate(op.arg),\n sa.text(repr(op.field)),\n type_=to_sqla_type(expr.type()),\n )\n\n\ndef _regex_extract(t, expr):\n string, pattern, index = map(t.translate, expr.op().args)\n result = sa.case(\n [\n (\n sa.func.regexp_matches(string, pattern),\n sa.func.regexp_extract(\n string,\n pattern,\n # DuckDB requires the index to be a constant so we compile\n # the value and inline it using sa.text\n sa.text(\n str(\n (index + 1).compile(\n compile_kwargs=dict(literal_binds=True)\n )\n )\n ),\n ),\n )\n ],\n else_=\"\",\n )\n return result\n\n\noperation_registry.update(\n {\n ops.ArrayColumn: _array_column,\n ops.ArrayConcat: fixed_arity('array_concat', 2),\n ops.ArrayIndex: fixed_arity('list_element', 2),\n ops.DayOfWeekName: unary(sa.func.dayname),\n ops.Literal: _literal,\n ops.Log2: unary(sa.func.log2),\n ops.Ln: unary(sa.func.ln),\n ops.Log: _log,\n # TODO: map operations, but DuckDB's maps are multimaps\n ops.Modulus: fixed_arity(operator.mod, 2),\n ops.Round: _round,\n ops.StructField: _struct_field,\n ops.TableColumn: _table_column,\n ops.TimestampDiff: fixed_arity('age', 2),\n ops.TimestampFromUNIX: _timestamp_from_unix,\n ops.Translate: fixed_arity('replace', 3),\n ops.TimestampNow: fixed_arity('now', 0),\n ops.RegexExtract: _regex_extract,\n ops.RegexReplace: fixed_arity(\"regexp_replace\", 3),\n }\n)\n", "path": "ibis/backends/duckdb/registry.py" } ]
[ { "content": "import collections\nimport operator\n\nimport numpy as np\nimport sqlalchemy as sa\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nfrom ibis.backends.base.sql.alchemy import to_sqla_type, unary\n\nfrom ..base.sql.alchemy.registry import _geospatial_functions, _table_column\nfrom ..postgres.registry import fixed_arity, operation_registry\n\noperation_registry = {\n op: operation_registry[op]\n # duckdb does not support geospatial operations, but shares most of the\n # remaining postgres rules\n for op in operation_registry.keys() - _geospatial_functions.keys()\n}\n\n\ndef _round(t, expr):\n arg, digits = expr.op().args\n sa_arg = t.translate(arg)\n\n if digits is None:\n return sa.func.round(sa_arg)\n\n return sa.func.round(sa_arg, t.translate(digits))\n\n\n_LOG_BASE_FUNCS = {\n 2: sa.func.log2,\n 10: sa.func.log,\n}\n\n\ndef _generic_log(arg, base):\n return sa.func.ln(arg) / sa.func.ln(base)\n\n\ndef _log(t, expr):\n arg, base = expr.op().args\n sa_arg = t.translate(arg)\n if base is not None:\n sa_base = t.translate(base)\n try:\n base_value = sa_base.value\n except AttributeError:\n return _generic_log(sa_arg, sa_base)\n else:\n func = _LOG_BASE_FUNCS.get(base_value, _generic_log)\n return func(sa_arg)\n return sa.func.ln(sa_arg)\n\n\ndef _timestamp_from_unix(t, expr):\n op = expr.op()\n arg, unit = op.args\n arg = t.translate(arg)\n\n if unit in {\"us\", \"ns\"}:\n raise ValueError(f\"`{unit}` unit is not supported!\")\n\n if unit == \"ms\":\n return sa.func.epoch_ms(arg)\n elif unit == \"s\":\n return sa.func.to_timestamp(arg)\n\n\ndef _literal(_, expr):\n dtype = expr.type()\n sqla_type = to_sqla_type(dtype)\n op = expr.op()\n value = op.value\n\n if isinstance(dtype, dt.Interval):\n return sa.text(f\"INTERVAL '{value} {dtype.resolution}'\")\n elif isinstance(dtype, dt.Set) or (\n isinstance(value, collections.abc.Sequence)\n and not isinstance(value, str)\n ):\n return sa.cast(sa.func.list_value(*value), sqla_type)\n elif isinstance(value, np.ndarray):\n return sa.cast(sa.func.list_value(*value.tolist()), sqla_type)\n elif isinstance(value, collections.abc.Mapping):\n if isinstance(dtype, dt.Struct):\n placeholders = \", \".join(\n f\"{key!r}: :v{i}\" for i, key in enumerate(value.keys())\n )\n return sa.text(f\"{{{placeholders}}}\").bindparams(\n *(\n sa.bindparam(f\"v{i:d}\", val)\n for i, val in enumerate(value.values())\n )\n )\n raise NotImplementedError(\n f\"Ibis dtype `{dtype}` with mapping type \"\n f\"`{type(value).__name__}` isn't yet supported with the duckdb \"\n \"backend\"\n )\n return sa.cast(sa.literal(value), sqla_type)\n\n\ndef _array_column(t, expr):\n (arg,) = expr.op().args\n sqla_type = to_sqla_type(expr.type())\n return sa.cast(sa.func.list_value(*map(t.translate, arg)), sqla_type)\n\n\ndef _struct_field(t, expr):\n op = expr.op()\n return sa.func.struct_extract(\n t.translate(op.arg),\n sa.text(repr(op.field)),\n type_=to_sqla_type(expr.type()),\n )\n\n\ndef _regex_extract(t, expr):\n string, pattern, index = map(t.translate, expr.op().args)\n result = sa.case(\n [\n (\n sa.func.regexp_matches(string, pattern),\n sa.func.regexp_extract(\n string,\n pattern,\n # DuckDB requires the index to be a constant so we compile\n # the value and inline it using sa.text\n sa.text(\n str(\n (index + 1).compile(\n compile_kwargs=dict(literal_binds=True)\n )\n )\n ),\n ),\n )\n ],\n else_=\"\",\n )\n return result\n\n\noperation_registry.update(\n {\n ops.ArrayColumn: _array_column,\n ops.ArrayConcat: fixed_arity('array_concat', 2),\n ops.ArrayIndex: fixed_arity('list_element', 2),\n ops.DayOfWeekName: unary(sa.func.dayname),\n ops.Literal: _literal,\n ops.Log2: unary(sa.func.log2),\n ops.Ln: unary(sa.func.ln),\n ops.Log: _log,\n # TODO: map operations, but DuckDB's maps are multimaps\n ops.Modulus: fixed_arity(operator.mod, 2),\n ops.Round: _round,\n ops.StructField: _struct_field,\n ops.TableColumn: _table_column,\n ops.TimestampDiff: fixed_arity('age', 2),\n ops.TimestampFromUNIX: _timestamp_from_unix,\n ops.Translate: fixed_arity('replace', 3),\n ops.TimestampNow: fixed_arity('now', 0),\n ops.RegexExtract: _regex_extract,\n ops.RegexReplace: fixed_arity(\"regexp_replace\", 3),\n }\n)\n", "path": "ibis/backends/duckdb/registry.py" } ]
diff --git a/ibis/backends/duckdb/registry.py b/ibis/backends/duckdb/registry.py index d6b5f4032785..a3cbbaf4edbb 100644 --- a/ibis/backends/duckdb/registry.py +++ b/ibis/backends/duckdb/registry.py @@ -99,7 +99,7 @@ def _literal(_, expr): f"`{type(value).__name__}` isn't yet supported with the duckdb " "backend" ) - return sa.literal(value) + return sa.cast(sa.literal(value), sqla_type) def _array_column(t, expr): diff --git a/ibis/backends/tests/test_aggregation.py b/ibis/backends/tests/test_aggregation.py index 36d9c7bff5d6..3b08455181f7 100644 --- a/ibis/backends/tests/test_aggregation.py +++ b/ibis/backends/tests/test_aggregation.py @@ -461,3 +461,14 @@ def collect_udf(v): ) backend.assert_frame_equal(result, expected, check_like=True) + + [email protected](["datafusion", "pyspark"]) +def test_binds_are_cast(alltypes): + expr = alltypes.aggregate( + high_line_count=( + alltypes.string_col.case().when('1-URGENT', 1).else_(0).end().sum() + ) + ) + + expr.execute() diff --git a/ibis/backends/tests/test_generic.py b/ibis/backends/tests/test_generic.py index 86f3e0aff62f..0c59dc884824 100644 --- a/ibis/backends/tests/test_generic.py +++ b/ibis/backends/tests/test_generic.py @@ -111,7 +111,7 @@ def test_coalesce(backend, con, expr, expected): # False assert result == decimal.Decimal(str(expected)) else: - assert result == expected + assert result == pytest.approx(expected) # TODO(dask) - identicalTo - #2553 diff --git a/ibis/backends/tests/test_temporal.py b/ibis/backends/tests/test_temporal.py index 9e2a03f20e62..a49aaa48e4de 100644 --- a/ibis/backends/tests/test_temporal.py +++ b/ibis/backends/tests/test_temporal.py @@ -362,7 +362,7 @@ def convert_to_offset(x): lambda t, be: t.timestamp_col.date() - ibis.date(date_value), lambda t, be: t.timestamp_col.dt.floor('d') - date_value, id='date-subtract-date', - marks=pytest.mark.notimpl(["duckdb", 'pyspark']), + marks=pytest.mark.notimpl(["pyspark"]), ), ], )
ansible__molecule-3446
remnants of ansible-lint in molecule docs <!--- Verify first that your issue is not already reported on GitHub --> <!--- Do not report bugs before reproducing them with the code of the main branch! --> <!--- Please also check https://molecule.readthedocs.io/en/latest/faq.html ---> <!--- Please use https://groups.google.com/forum/#!forum/molecule-users for usage questions --> # Issue Type - Bug report # Molecule and Ansible details Note: the python was installed via conda, but the python packages are installed using pip, not conda. ``` ansible [core 2.12.2] python version = 3.9.10 | packaged by conda-forge | (main, Feb 1 2022, 21:25:34) [Clang 11.1.0 ] jinja version = 3.0.3 libyaml = True ansible python module location = /opt/homebrew/Caskroom/miniconda/base/envs/tmptest/lib/python3.9/site-packages/ansible molecule 3.6.1 using python 3.9 ansible:2.12.2 delegated:3.6.1 from molecule ``` Molecule installation method (one of): - pip Ansible installation method (one of): - pip Detail any linters or test runners used: ansible-lint # Desired Behavior Assuming ansible-lint was removed for good reason and shouldn't be added back into the `molecule[lint]` extra: I think this would make clearer ansible-lint is never installed by molecule: * add note here that ansible-lint is not installed by molecule, even if you installed the `[lint]` extra https://molecule.readthedocs.io/en/latest/configuration.html?highlight=ansible-lint#lint * remove misleading comment in setup.cfg https://github.com/ansible-community/molecule/blob/c33c205b570cd95d599d16afa8772fabba51dd40/setup.cfg#L112 * change to actual default (yamllint) https://github.com/ansible-community/molecule/blob/c7ae6a27bed9ba6423d6dfe11d8e0d5c54da094f/src/molecule/command/init/scenario.py#L165 # Actual Behaviour I can't say for sure (don't know project history well enough) but I *think* ansible-lint was once available through the `molecule[lint]` extra, but was subsequently promoted to a regular dependency, and eventually removed from molecule deps altogether. It appears there were a few spots in docs that got missed and may mislead a new user (see: me) into thinking ansible-lint will be installed with `pip install molecule[lint]` when in fact it is not.
[ { "content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Base class used by init scenario command.\"\"\"\n\nimport logging\nimport os\nfrom typing import Dict\n\nimport click\n\nfrom molecule import api, config, util\nfrom molecule.command import base as command_base\nfrom molecule.command.init import base\nfrom molecule.config import DEFAULT_DRIVER\n\nLOG = logging.getLogger(__name__)\n\n\nclass Scenario(base.Base):\n \"\"\"\n Scenario Class.\n\n .. program:: molecule init scenario bar --role-name foo\n\n .. option:: molecule init scenario bar --role-name foo\n\n Initialize a new scenario. In order to customise the role, please refer\n to the `init role` command.\n\n .. program:: cd foo; molecule init scenario bar --role-name foo\n\n .. option:: cd foo; molecule init scenario bar --role-name foo\n\n Initialize an existing role with Molecule:\n\n .. program:: cd foo; molecule init scenario bar --role-name foo\n\n .. option:: cd foo; molecule init scenario bar --role-name foo\n\n Initialize a new scenario using a local *cookiecutter* template for the\n driver configuration.\n \"\"\" # noqa\n\n def __init__(self, command_args: Dict[str, str]):\n \"\"\"Construct Scenario.\"\"\"\n self._command_args = command_args\n\n def execute(self):\n \"\"\"\n Execute the actions necessary to perform a `molecule init scenario` and \\\n returns None.\n\n :return: None\n \"\"\"\n scenario_name = self._command_args[\"scenario_name\"]\n role_name = os.getcwd().split(os.sep)[-1]\n role_directory = util.abs_path(os.path.join(os.getcwd(), os.pardir))\n\n msg = f\"Initializing new scenario {scenario_name}...\"\n LOG.info(msg)\n molecule_directory = config.molecule_directory(\n os.path.join(role_directory, role_name)\n )\n scenario_directory = os.path.join(molecule_directory, scenario_name)\n\n if os.path.isdir(scenario_directory):\n msg = (\n f\"The directory molecule/{scenario_name} exists. \"\n \"Cannot create new scenario.\"\n )\n util.sysexit_with_message(msg)\n\n driver_template = api.drivers()[\n self._command_args[\"driver_name\"]\n ].template_dir()\n if \"driver_template\" in self._command_args:\n self._validate_template_dir(self._command_args[\"driver_template\"])\n cli_driver_template = f\"{self._command_args['driver_template']}/{self._command_args['driver_name']}\"\n if os.path.isdir(cli_driver_template):\n driver_template = cli_driver_template\n else:\n LOG.warning(\n \"Driver not found in custom template directory(%s), \"\n \"using the default template instead\",\n cli_driver_template,\n )\n scenario_base_directory = os.path.join(role_directory, role_name)\n templates = [\n driver_template,\n api.verifiers()[self._command_args[\"verifier_name\"]].template_dir(),\n ]\n self._process_templates(\"molecule\", self._command_args, role_directory)\n for template in templates:\n self._process_templates(\n template, self._command_args, scenario_base_directory\n )\n\n role_directory = os.path.join(role_directory, role_name)\n msg = f\"Initialized scenario in {scenario_directory} successfully.\"\n LOG.info(msg)\n\n\ndef _role_exists(ctx, param, value: str): # pragma: no cover\n # if role name was not mentioned we assume that current directory is the\n # one hosting the role and determining the role name.\n if not value:\n value = os.path.basename(os.getcwd())\n\n role_directory = os.path.join(os.pardir, value)\n if not os.path.exists(role_directory):\n msg = f\"The role '{value}' not found. \" \"Please choose the proper role name.\"\n util.sysexit_with_message(msg)\n return value\n\n\ndef _default_scenario_exists(ctx, param, value: str): # pragma: no cover\n if value == command_base.MOLECULE_DEFAULT_SCENARIO_NAME:\n return value\n\n default_scenario_directory = os.path.join(\n \"molecule\", command_base.MOLECULE_DEFAULT_SCENARIO_NAME\n )\n if not os.path.exists(default_scenario_directory):\n msg = f\"The default scenario not found. Please create a scenario named '{command_base.MOLECULE_DEFAULT_SCENARIO_NAME}' first.\"\n util.sysexit_with_message(msg)\n return value\n\n\n@command_base.click_command_ex()\[email protected]_context\[email protected](\n \"--dependency-name\",\n type=click.Choice([\"galaxy\"]),\n default=\"galaxy\",\n help=\"Name of dependency to initialize. (galaxy)\",\n)\[email protected](\n \"--driver-name\",\n \"-d\",\n type=click.Choice([str(s) for s in api.drivers()]),\n default=DEFAULT_DRIVER,\n help=f\"Name of driver to initialize. ({DEFAULT_DRIVER})\",\n)\[email protected](\n \"--lint-name\",\n type=click.Choice([\"yamllint\"]),\n default=\"yamllint\",\n help=\"Name of lint to initialize. (ansible-lint)\",\n)\[email protected](\n \"--provisioner-name\",\n type=click.Choice([\"ansible\"]),\n default=\"ansible\",\n help=\"Name of provisioner to initialize. (ansible)\",\n)\[email protected](\n \"--role-name\",\n \"-r\",\n required=False,\n callback=_role_exists,\n help=\"Name of the role to create.\",\n)\[email protected](\n \"scenario-name\",\n default=command_base.MOLECULE_DEFAULT_SCENARIO_NAME,\n required=False,\n callback=_default_scenario_exists,\n)\[email protected](\n \"--verifier-name\",\n type=click.Choice([str(s) for s in api.verifiers()]),\n default=\"ansible\",\n help=\"Name of verifier to initialize. (ansible)\",\n)\ndef scenario(\n ctx,\n dependency_name,\n driver_name,\n lint_name,\n provisioner_name,\n role_name,\n scenario_name,\n verifier_name,\n): # pragma: no cover\n \"\"\"Initialize a new scenario for use with Molecule.\n\n If name is not specified the 'default' value will be used.\n \"\"\"\n command_args = {\n \"dependency_name\": dependency_name,\n \"driver_name\": driver_name,\n \"lint_name\": lint_name,\n \"provisioner_name\": provisioner_name,\n \"role_name\": role_name,\n \"scenario_name\": scenario_name,\n \"subcommand\": __name__,\n \"verifier_name\": verifier_name,\n }\n\n s = Scenario(command_args)\n s.execute()\n", "path": "src/molecule/command/init/scenario.py" } ]
[ { "content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Base class used by init scenario command.\"\"\"\n\nimport logging\nimport os\nfrom typing import Dict\n\nimport click\n\nfrom molecule import api, config, util\nfrom molecule.command import base as command_base\nfrom molecule.command.init import base\nfrom molecule.config import DEFAULT_DRIVER\n\nLOG = logging.getLogger(__name__)\n\n\nclass Scenario(base.Base):\n \"\"\"\n Scenario Class.\n\n .. program:: molecule init scenario bar --role-name foo\n\n .. option:: molecule init scenario bar --role-name foo\n\n Initialize a new scenario. In order to customise the role, please refer\n to the `init role` command.\n\n .. program:: cd foo; molecule init scenario bar --role-name foo\n\n .. option:: cd foo; molecule init scenario bar --role-name foo\n\n Initialize an existing role with Molecule:\n\n .. program:: cd foo; molecule init scenario bar --role-name foo\n\n .. option:: cd foo; molecule init scenario bar --role-name foo\n\n Initialize a new scenario using a local *cookiecutter* template for the\n driver configuration.\n \"\"\" # noqa\n\n def __init__(self, command_args: Dict[str, str]):\n \"\"\"Construct Scenario.\"\"\"\n self._command_args = command_args\n\n def execute(self):\n \"\"\"\n Execute the actions necessary to perform a `molecule init scenario` and \\\n returns None.\n\n :return: None\n \"\"\"\n scenario_name = self._command_args[\"scenario_name\"]\n role_name = os.getcwd().split(os.sep)[-1]\n role_directory = util.abs_path(os.path.join(os.getcwd(), os.pardir))\n\n msg = f\"Initializing new scenario {scenario_name}...\"\n LOG.info(msg)\n molecule_directory = config.molecule_directory(\n os.path.join(role_directory, role_name)\n )\n scenario_directory = os.path.join(molecule_directory, scenario_name)\n\n if os.path.isdir(scenario_directory):\n msg = (\n f\"The directory molecule/{scenario_name} exists. \"\n \"Cannot create new scenario.\"\n )\n util.sysexit_with_message(msg)\n\n driver_template = api.drivers()[\n self._command_args[\"driver_name\"]\n ].template_dir()\n if \"driver_template\" in self._command_args:\n self._validate_template_dir(self._command_args[\"driver_template\"])\n cli_driver_template = f\"{self._command_args['driver_template']}/{self._command_args['driver_name']}\"\n if os.path.isdir(cli_driver_template):\n driver_template = cli_driver_template\n else:\n LOG.warning(\n \"Driver not found in custom template directory(%s), \"\n \"using the default template instead\",\n cli_driver_template,\n )\n scenario_base_directory = os.path.join(role_directory, role_name)\n templates = [\n driver_template,\n api.verifiers()[self._command_args[\"verifier_name\"]].template_dir(),\n ]\n self._process_templates(\"molecule\", self._command_args, role_directory)\n for template in templates:\n self._process_templates(\n template, self._command_args, scenario_base_directory\n )\n\n role_directory = os.path.join(role_directory, role_name)\n msg = f\"Initialized scenario in {scenario_directory} successfully.\"\n LOG.info(msg)\n\n\ndef _role_exists(ctx, param, value: str): # pragma: no cover\n # if role name was not mentioned we assume that current directory is the\n # one hosting the role and determining the role name.\n if not value:\n value = os.path.basename(os.getcwd())\n\n role_directory = os.path.join(os.pardir, value)\n if not os.path.exists(role_directory):\n msg = f\"The role '{value}' not found. \" \"Please choose the proper role name.\"\n util.sysexit_with_message(msg)\n return value\n\n\ndef _default_scenario_exists(ctx, param, value: str): # pragma: no cover\n if value == command_base.MOLECULE_DEFAULT_SCENARIO_NAME:\n return value\n\n default_scenario_directory = os.path.join(\n \"molecule\", command_base.MOLECULE_DEFAULT_SCENARIO_NAME\n )\n if not os.path.exists(default_scenario_directory):\n msg = f\"The default scenario not found. Please create a scenario named '{command_base.MOLECULE_DEFAULT_SCENARIO_NAME}' first.\"\n util.sysexit_with_message(msg)\n return value\n\n\n@command_base.click_command_ex()\[email protected]_context\[email protected](\n \"--dependency-name\",\n type=click.Choice([\"galaxy\"]),\n default=\"galaxy\",\n help=\"Name of dependency to initialize. (galaxy)\",\n)\[email protected](\n \"--driver-name\",\n \"-d\",\n type=click.Choice([str(s) for s in api.drivers()]),\n default=DEFAULT_DRIVER,\n help=f\"Name of driver to initialize. ({DEFAULT_DRIVER})\",\n)\[email protected](\n \"--lint-name\",\n type=click.Choice([\"yamllint\"]),\n default=\"yamllint\",\n help=\"Name of lint to initialize. (yamllint)\",\n)\[email protected](\n \"--provisioner-name\",\n type=click.Choice([\"ansible\"]),\n default=\"ansible\",\n help=\"Name of provisioner to initialize. (ansible)\",\n)\[email protected](\n \"--role-name\",\n \"-r\",\n required=False,\n callback=_role_exists,\n help=\"Name of the role to create.\",\n)\[email protected](\n \"scenario-name\",\n default=command_base.MOLECULE_DEFAULT_SCENARIO_NAME,\n required=False,\n callback=_default_scenario_exists,\n)\[email protected](\n \"--verifier-name\",\n type=click.Choice([str(s) for s in api.verifiers()]),\n default=\"ansible\",\n help=\"Name of verifier to initialize. (ansible)\",\n)\ndef scenario(\n ctx,\n dependency_name,\n driver_name,\n lint_name,\n provisioner_name,\n role_name,\n scenario_name,\n verifier_name,\n): # pragma: no cover\n \"\"\"Initialize a new scenario for use with Molecule.\n\n If name is not specified the 'default' value will be used.\n \"\"\"\n command_args = {\n \"dependency_name\": dependency_name,\n \"driver_name\": driver_name,\n \"lint_name\": lint_name,\n \"provisioner_name\": provisioner_name,\n \"role_name\": role_name,\n \"scenario_name\": scenario_name,\n \"subcommand\": __name__,\n \"verifier_name\": verifier_name,\n }\n\n s = Scenario(command_args)\n s.execute()\n", "path": "src/molecule/command/init/scenario.py" } ]
diff --git a/docs/configuration.rst b/docs/configuration.rst index dfa22f6c87..511a3e8344 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -14,7 +14,8 @@ In order to help Ansible find used modules and roles, molecule will perform a prerun set of actions. These involve installing dependencies from ``requirements.yml`` specified at project level, install a standalone role or a collection. The destination is ``project_dir/.cache`` and the code itself -is reused from ansible-lint, which has to do the same actions. +was reused from ansible-lint, which has to do the same actions. +(Note: ansible-lint is not included with molecule.) This assures that when you include a role inside molecule playbooks, Ansible will be able to find that role, and that the include is exactly the same as @@ -138,6 +139,9 @@ Molecule was able to use up to three linters and while it was aimed to flexible about them, it ended up creating more confusions to the users. We decided to maximize flexibility by just calling an external shell command. +Note: ansible-lint is not included with molecule. The ``molecule[lint]`` extra +does not install ansible-lint. + .. code-block:: yaml lint: | diff --git a/docs/getting-started.rst b/docs/getting-started.rst index 63e77ff901..77e97f7187 100644 --- a/docs/getting-started.rst +++ b/docs/getting-started.rst @@ -122,7 +122,8 @@ keys represent the high level components that Molecule provides. These are: the driver to delegate the task of creating instances. * The :ref:`lint` command. Molecule can call external commands to ensure - that best practices are encouraged. + that best practices are encouraged. Note: `ansible-lint` is not included with + molecule or molecule[lint]. * The :ref:`platforms` definitions. Molecule relies on this to know which instances to create, name and to which group each instance belongs. If you diff --git a/docs/installation.rst b/docs/installation.rst index 1d88ef9e1a..6d24642c9c 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -94,7 +94,14 @@ Install Molecule: .. code-block:: bash - $ python3 -m pip install --user "molecule[lint]" + $ python3 -m pip install --user "molecule" + +Molecule does not include ansible-lint (nor does the lint extra), but +is easily installed separately: + +.. code-block:: bash + + $ python3 -m pip install --user "molecule ansible-lint" Molecule uses the "delegated" driver by default. Other drivers can be installed separately from PyPI, such as the molecule-docker driver. @@ -103,7 +110,7 @@ command would look like this: .. code-block:: bash - $ python3 -m pip install --user "molecule[docker,lint]" + $ python3 -m pip install --user "molecule[docker]" Other drivers, such as ``molecule-podman``, ``molecule-vagrant``, ``molecule-azure`` or ``molecule-hetzner`` are also available. diff --git a/setup.cfg b/setup.cfg index e278acf093..1020174040 100644 --- a/setup.cfg +++ b/setup.cfg @@ -105,7 +105,6 @@ test = pytest-xdist >= 2.1.0 pytest >= 6.1.2 lint = - # ansible-lint is now a core dependency, duplicating it here would confuse pip flake8 >= 3.8.4 pre-commit >= 2.10.1 yamllint diff --git a/src/molecule/command/init/scenario.py b/src/molecule/command/init/scenario.py index cc8cba6d6f..32a1d0d79b 100644 --- a/src/molecule/command/init/scenario.py +++ b/src/molecule/command/init/scenario.py @@ -162,7 +162,7 @@ def _default_scenario_exists(ctx, param, value: str): # pragma: no cover "--lint-name", type=click.Choice(["yamllint"]), default="yamllint", - help="Name of lint to initialize. (ansible-lint)", + help="Name of lint to initialize. (yamllint)", ) @click.option( "--provisioner-name",
scipy__scipy-9996
lsq_linear hangs/infinite loop with 'trf' method <!-- Thank you for taking the time to report a SciPy issue. Please describe the issue in detail, and for bug reports fill in the fields below. You can delete the sections that don't apply to your issue. You can view the final output by clicking the preview button above. --> I have found several cases where scipy.optimize.lsq_linear with non-negative bounds (i.e. (0, numpy.Inf)) hangs, seemingly stuck in an infinite loop in some C code (LAPACK?) that can't be terminated via ctrl+c. It ran for at least two days the first time I noticed it. The non-default 'bvls' method and scipy.optimize.nnls() both work on the same data, one example of which I have attached: [x.txt](https://github.com/scipy/scipy/files/3010094/x.txt) [y.txt](https://github.com/scipy/scipy/files/3010095/y.txt) ### Reproducing code example: <!-- If you place your code between the triple backticks below, it will be marked as a code block automatically --> ``` import numpy as np; import scipy.optimize as spopt x = np.loadtxt('x.txt') y = np.loadtxt('y.txt') print(spopt.nnls(x,y)) print(spopt.lsq_linear(x, y, bounds=(0, np.Inf), method='bvls')) print(spopt.lsq_linear(x, y, bounds=(0, np.Inf), method='trf', verbose=2)) ``` ### Output: <!-- If any, paste the *full* error message inside a code block as above (starting from line Traceback) --> ``` In [1]: import numpy as np; import scipy.optimize as spopt ...: x = np.loadtxt('x.txt') ...: y = np.loadtxt('y.txt') ...: print(spopt.nnls(x,y)) ...: print(spopt.lsq_linear(x, y, bounds=(0, np.Inf), method='bvls')) ...: print(spopt.lsq_linear(x, y, bounds=(0, np.Inf), method='trf', verbose=2)) ...: (array([ 2.09932938, 0. , 0. , 14.74758632]), 1.1295995521670104) active_mask: array([ 0., -1., -1., 0.]) cost: 0.6379975741279486 fun: array([-0.003566 , -0.00431135, -0.00317054, ..., 0.00151165, 0.00256816, 0.00488628]) message: 'The first-order optimality measure is less than `tol`.' nit: 3 optimality: 4.209012793594848e-15 status: 1 success: True x: array([ 2.09932938, 0. , 0. , 14.74758632]) Iteration Cost Cost reduction Step norm Optimality 0 5.9926e+01 6.86e+01 1 9.5818e+00 5.03e+01 2.39e+00 1.62e+01 2 1.5210e+00 8.06e+00 1.07e+00 3.26e+00 3 8.3612e-01 6.85e-01 3.31e-01 3.94e-01 4 7.9232e-01 4.38e-02 6.33e-01 7.22e-02 5 6.9727e-01 9.51e-02 9.96e+00 4.75e-02 6 6.9645e-01 8.16e-04 1.43e-02 7.09e-02 /software/lsstsw/stack_20181012/python/miniconda3-4.5.4/envs/lsst-scipipe/lib/python3.6/site-packages/scipy/optimize/_lsq/common.py:321: RuntimeWarning: invalid value encountered in add y = a * t**2 + b * t + c /software/lsstsw/stack_20181012/python/miniconda3-4.5.4/envs/lsst-scipipe/lib/python3.6/site-packages/scipy/optimize/_lsq/common.py:362: RuntimeWarning: invalid value encountered in double_scalars return 0.5 * q + l 7 inf nan inf inf ``` ### Scipy/Numpy/Python version information: <!-- You can simply run the following and paste the result in a code block --> ``` 1.1.0 1.14.5 sys.version_info(major=3, minor=6, micro=6, releaselevel='final', serial=0) ``` I have also reproduced this on a different machine with 1.0.0 and 1.2.1.
[ { "content": "\"\"\"Functions used by least-squares algorithms.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom math import copysign\n\nimport numpy as np\nfrom numpy.linalg import norm\n\nfrom scipy.linalg import cho_factor, cho_solve, LinAlgError\nfrom scipy.sparse import issparse\nfrom scipy.sparse.linalg import LinearOperator, aslinearoperator\n\n\nEPS = np.finfo(float).eps\n\n\n# Functions related to a trust-region problem.\n\n\ndef intersect_trust_region(x, s, Delta):\n \"\"\"Find the intersection of a line with the boundary of a trust region.\n \n This function solves the quadratic equation with respect to t\n ||(x + s*t)||**2 = Delta**2.\n \n Returns\n -------\n t_neg, t_pos : tuple of float\n Negative and positive roots.\n \n Raises\n ------\n ValueError\n If `s` is zero or `x` is not within the trust region.\n \"\"\"\n a = np.dot(s, s)\n if a == 0:\n raise ValueError(\"`s` is zero.\")\n\n b = np.dot(x, s)\n\n c = np.dot(x, x) - Delta**2\n if c > 0:\n raise ValueError(\"`x` is not within the trust region.\")\n\n d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.\n\n # Computations below avoid loss of significance, see \"Numerical Recipes\".\n q = -(b + copysign(d, b))\n t1 = q / a\n t2 = c / q\n\n if t1 < t2:\n return t1, t2\n else:\n return t2, t1\n\n\ndef solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,\n rtol=0.01, max_iter=10):\n \"\"\"Solve a trust-region problem arising in least-squares minimization.\n \n This function implements a method described by J. J. More [1]_ and used\n in MINPACK, but it relies on a single SVD of Jacobian instead of series\n of Cholesky decompositions. Before running this function, compute:\n ``U, s, VT = svd(J, full_matrices=False)``.\n \n Parameters\n ----------\n n : int\n Number of variables.\n m : int\n Number of residuals.\n uf : ndarray\n Computed as U.T.dot(f).\n s : ndarray\n Singular values of J.\n V : ndarray\n Transpose of VT.\n Delta : float\n Radius of a trust region.\n initial_alpha : float, optional\n Initial guess for alpha, which might be available from a previous\n iteration. If None, determined automatically.\n rtol : float, optional\n Stopping tolerance for the root-finding procedure. Namely, the\n solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.\n max_iter : int, optional\n Maximum allowed number of iterations for the root-finding procedure.\n \n Returns\n -------\n p : ndarray, shape (n,)\n Found solution of a trust-region problem.\n alpha : float\n Positive value such that (J.T*J + alpha*I)*p = -J.T*f.\n Sometimes called Levenberg-Marquardt parameter.\n n_iter : int\n Number of iterations made by root-finding procedure. Zero means\n that Gauss-Newton step was selected as the solution.\n \n References\n ----------\n .. [1] More, J. J., \"The Levenberg-Marquardt Algorithm: Implementation\n and Theory,\" Numerical Analysis, ed. G. A. Watson, Lecture Notes\n in Mathematics 630, Springer Verlag, pp. 105-116, 1977.\n \"\"\"\n def phi_and_derivative(alpha, suf, s, Delta):\n \"\"\"Function of which to find zero.\n \n It is defined as \"norm of regularized (by alpha) least-squares\n solution minus `Delta`\". Refer to [1]_.\n \"\"\"\n denom = s**2 + alpha\n p_norm = norm(suf / denom)\n phi = p_norm - Delta\n phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm\n return phi, phi_prime\n\n suf = s * uf\n\n # Check if J has full rank and try Gauss-Newton step.\n if m >= n:\n threshold = EPS * m * s[0]\n full_rank = s[-1] > threshold\n else:\n full_rank = False\n\n if full_rank:\n p = -V.dot(uf / s)\n if norm(p) <= Delta:\n return p, 0.0, 0\n\n alpha_upper = norm(suf) / Delta\n\n if full_rank:\n phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)\n alpha_lower = -phi / phi_prime\n else:\n alpha_lower = 0.0\n\n if initial_alpha is None or not full_rank and initial_alpha == 0:\n alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)\n else:\n alpha = initial_alpha\n\n for it in range(max_iter):\n if alpha < alpha_lower or alpha > alpha_upper:\n alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)\n\n phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)\n\n if phi < 0:\n alpha_upper = alpha\n\n ratio = phi / phi_prime\n alpha_lower = max(alpha_lower, alpha - ratio)\n alpha -= (phi + Delta) * ratio / Delta\n\n if np.abs(phi) < rtol * Delta:\n break\n\n p = -V.dot(suf / (s**2 + alpha))\n\n # Make the norm of p equal to Delta, p is changed only slightly during\n # this. It is done to prevent p lie outside the trust region (which can\n # cause problems later).\n p *= Delta / norm(p)\n\n return p, alpha, it + 1\n\n\ndef solve_trust_region_2d(B, g, Delta):\n \"\"\"Solve a general trust-region problem in 2 dimensions.\n \n The problem is reformulated as a 4-th order algebraic equation,\n the solution of which is found by numpy.roots.\n \n Parameters\n ----------\n B : ndarray, shape (2, 2)\n Symmetric matrix, defines a quadratic term of the function.\n g : ndarray, shape (2,)\n Defines a linear term of the function.\n Delta : float\n Radius of a trust region.\n \n Returns\n -------\n p : ndarray, shape (2,)\n Found solution.\n newton_step : bool\n Whether the returned solution is the Newton step which lies within\n the trust region.\n \"\"\"\n try:\n R, lower = cho_factor(B)\n p = -cho_solve((R, lower), g)\n if np.dot(p, p) <= Delta**2:\n return p, True\n except LinAlgError:\n pass\n\n a = B[0, 0] * Delta**2\n b = B[0, 1] * Delta**2\n c = B[1, 1] * Delta**2\n\n d = g[0] * Delta\n f = g[1] * Delta\n\n coeffs = np.array(\n [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])\n t = np.roots(coeffs) # Can handle leading zeros.\n t = np.real(t[np.isreal(t)])\n\n p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))\n value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)\n i = np.argmin(value)\n p = p[:, i]\n\n return p, False\n\n\ndef update_tr_radius(Delta, actual_reduction, predicted_reduction,\n step_norm, bound_hit):\n \"\"\"Update the radius of a trust region based on the cost reduction.\n\n Returns\n -------\n Delta : float\n New radius.\n ratio : float\n Ratio between actual and predicted reductions.\n \"\"\"\n if predicted_reduction > 0:\n ratio = actual_reduction / predicted_reduction\n elif predicted_reduction == actual_reduction == 0:\n ratio = 1\n else:\n ratio = 0\n\n if ratio < 0.25:\n Delta = 0.25 * step_norm\n elif ratio > 0.75 and bound_hit:\n Delta *= 2.0\n\n return Delta, ratio\n\n\n# Construction and minimization of quadratic functions.\n\n\ndef build_quadratic_1d(J, g, s, diag=None, s0=None):\n \"\"\"Parameterize a multivariate quadratic function along a line.\n \n The resulting univariate quadratic function is given as follows:\n ::\n f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +\n g.T * (s0 + s*t)\n \n Parameters\n ----------\n J : ndarray, sparse matrix or LinearOperator shape (m, n)\n Jacobian matrix, affects the quadratic term.\n g : ndarray, shape (n,)\n Gradient, defines the linear term.\n s : ndarray, shape (n,)\n Direction vector of a line.\n diag : None or ndarray with shape (n,), optional\n Addition diagonal part, affects the quadratic term.\n If None, assumed to be 0.\n s0 : None or ndarray with shape (n,), optional\n Initial point. If None, assumed to be 0.\n \n Returns\n -------\n a : float\n Coefficient for t**2.\n b : float\n Coefficient for t.\n c : float\n Free term. Returned only if `s0` is provided.\n \"\"\"\n v = J.dot(s)\n a = np.dot(v, v)\n if diag is not None:\n a += np.dot(s * diag, s)\n a *= 0.5\n\n b = np.dot(g, s)\n\n if s0 is not None:\n u = J.dot(s0)\n b += np.dot(u, v)\n c = 0.5 * np.dot(u, u) + np.dot(g, s0)\n if diag is not None:\n b += np.dot(s0 * diag, s)\n c += 0.5 * np.dot(s0 * diag, s0)\n return a, b, c\n else:\n return a, b\n\n\ndef minimize_quadratic_1d(a, b, lb, ub, c=0):\n \"\"\"Minimize a 1-d quadratic function subject to bounds.\n \n The free term `c` is 0 by default. Bounds must be finite.\n \n Returns\n -------\n t : float\n Minimum point.\n y : float\n Minimum value.\n \"\"\"\n t = [lb, ub]\n if a != 0:\n extremum = -0.5 * b / a\n if lb < extremum < ub:\n t.append(extremum)\n t = np.asarray(t)\n y = a * t**2 + b * t + c\n min_index = np.argmin(y)\n return t[min_index], y[min_index]\n\n\ndef evaluate_quadratic(J, g, s, diag=None):\n \"\"\"Compute values of a quadratic function arising in least squares.\n \n The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.\n \n Parameters\n ----------\n J : ndarray, sparse matrix or LinearOperator, shape (m, n)\n Jacobian matrix, affects the quadratic term.\n g : ndarray, shape (n,)\n Gradient, defines the linear term.\n s : ndarray, shape (k, n) or (n,)\n Array containing steps as rows.\n diag : ndarray, shape (n,), optional\n Addition diagonal part, affects the quadratic term.\n If None, assumed to be 0.\n \n Returns\n -------\n values : ndarray with shape (k,) or float\n Values of the function. If `s` was 2-dimensional then ndarray is\n returned, otherwise float is returned.\n \"\"\"\n if s.ndim == 1:\n Js = J.dot(s)\n q = np.dot(Js, Js)\n if diag is not None:\n q += np.dot(s * diag, s)\n else:\n Js = J.dot(s.T)\n q = np.sum(Js**2, axis=0)\n if diag is not None:\n q += np.sum(diag * s**2, axis=1)\n\n l = np.dot(s, g)\n\n return 0.5 * q + l\n\n\n# Utility functions to work with bound constraints.\n\n\ndef in_bounds(x, lb, ub):\n \"\"\"Check if a point lies within bounds.\"\"\"\n return np.all((x >= lb) & (x <= ub))\n\n\ndef step_size_to_bound(x, s, lb, ub):\n \"\"\"Compute a min_step size required to reach a bound.\n \n The function computes a positive scalar t, such that x + s * t is on\n the bound.\n \n Returns\n -------\n step : float\n Computed step. Non-negative value.\n hits : ndarray of int with shape of x\n Each element indicates whether a corresponding variable reaches the\n bound:\n \n * 0 - the bound was not hit.\n * -1 - the lower bound was hit.\n * 1 - the upper bound was hit.\n \"\"\"\n non_zero = np.nonzero(s)\n s_non_zero = s[non_zero]\n steps = np.empty_like(x)\n steps.fill(np.inf)\n with np.errstate(over='ignore'):\n steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,\n (ub - x)[non_zero] / s_non_zero)\n min_step = np.min(steps)\n return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)\n\n\ndef find_active_constraints(x, lb, ub, rtol=1e-10):\n \"\"\"Determine which constraints are active in a given point.\n \n The threshold is computed using `rtol` and the absolute value of the\n closest bound.\n \n Returns\n -------\n active : ndarray of int with shape of x\n Each component shows whether the corresponding constraint is active:\n \n * 0 - a constraint is not active.\n * -1 - a lower bound is active.\n * 1 - a upper bound is active.\n \"\"\"\n active = np.zeros_like(x, dtype=int)\n\n if rtol == 0:\n active[x <= lb] = -1\n active[x >= ub] = 1\n return active\n\n lower_dist = x - lb\n upper_dist = ub - x\n\n lower_threshold = rtol * np.maximum(1, np.abs(lb))\n upper_threshold = rtol * np.maximum(1, np.abs(ub))\n\n lower_active = (np.isfinite(lb) &\n (lower_dist <= np.minimum(upper_dist, lower_threshold)))\n active[lower_active] = -1\n\n upper_active = (np.isfinite(ub) &\n (upper_dist <= np.minimum(lower_dist, upper_threshold)))\n active[upper_active] = 1\n\n return active\n\n\ndef make_strictly_feasible(x, lb, ub, rstep=1e-10):\n \"\"\"Shift a point to the interior of a feasible region.\n \n Each element of the returned vector is at least at a relative distance\n `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.\n \"\"\"\n x_new = x.copy()\n\n active = find_active_constraints(x, lb, ub, rstep)\n lower_mask = np.equal(active, -1)\n upper_mask = np.equal(active, 1)\n\n if rstep == 0:\n x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])\n x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])\n else:\n x_new[lower_mask] = (lb[lower_mask] +\n rstep * np.maximum(1, np.abs(lb[lower_mask])))\n x_new[upper_mask] = (ub[upper_mask] -\n rstep * np.maximum(1, np.abs(ub[upper_mask])))\n\n tight_bounds = (x_new < lb) | (x_new > ub)\n x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])\n\n return x_new\n\n \ndef CL_scaling_vector(x, g, lb, ub):\n \"\"\"Compute Coleman-Li scaling vector and its derivatives.\n \n Components of a vector v are defined as follows:\n ::\n | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf\n v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf\n | 1, otherwise\n \n According to this definition v[i] >= 0 for all i. It differs from the\n definition in paper [1]_ (eq. (2.2)), where the absolute value of v is\n used. Both definitions are equivalent down the line.\n Derivatives of v with respect to x take value 1, -1 or 0 depending on a\n case.\n \n Returns\n -------\n v : ndarray with shape of x\n Scaling vector.\n dv : ndarray with shape of x\n Derivatives of v[i] with respect to x[i], diagonal elements of v's\n Jacobian.\n \n References\n ----------\n .. [1] M.A. Branch, T.F. Coleman, and Y. Li, \"A Subspace, Interior,\n and Conjugate Gradient Method for Large-Scale Bound-Constrained\n Minimization Problems,\" SIAM Journal on Scientific Computing,\n Vol. 21, Number 1, pp 1-23, 1999.\n \"\"\"\n v = np.ones_like(x)\n dv = np.zeros_like(x)\n\n mask = (g < 0) & np.isfinite(ub)\n v[mask] = ub[mask] - x[mask]\n dv[mask] = -1\n\n mask = (g > 0) & np.isfinite(lb)\n v[mask] = x[mask] - lb[mask]\n dv[mask] = 1\n\n return v, dv\n\n\ndef reflective_transformation(y, lb, ub):\n \"\"\"Compute reflective transformation and its gradient.\"\"\"\n if in_bounds(y, lb, ub):\n return y, np.ones_like(y)\n\n lb_finite = np.isfinite(lb)\n ub_finite = np.isfinite(ub)\n\n x = y.copy()\n g_negative = np.zeros_like(y, dtype=bool)\n\n mask = lb_finite & ~ub_finite\n x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])\n g_negative[mask] = y[mask] < lb[mask]\n\n mask = ~lb_finite & ub_finite\n x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])\n g_negative[mask] = y[mask] > ub[mask]\n\n mask = lb_finite & ub_finite\n d = ub - lb\n t = np.remainder(y[mask] - lb[mask], 2 * d[mask])\n x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)\n g_negative[mask] = t > d[mask]\n\n g = np.ones_like(y)\n g[g_negative] = -1\n\n return x, g\n\n\n# Functions to display algorithm's progress.\n\n\ndef print_header_nonlinear():\n print(\"{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}\"\n .format(\"Iteration\", \"Total nfev\", \"Cost\", \"Cost reduction\",\n \"Step norm\", \"Optimality\"))\n\n\ndef print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,\n step_norm, optimality):\n if cost_reduction is None:\n cost_reduction = \" \" * 15\n else:\n cost_reduction = \"{0:^15.2e}\".format(cost_reduction)\n\n if step_norm is None:\n step_norm = \" \" * 15\n else:\n step_norm = \"{0:^15.2e}\".format(step_norm)\n\n print(\"{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}\"\n .format(iteration, nfev, cost, cost_reduction,\n step_norm, optimality))\n\n\ndef print_header_linear():\n print(\"{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}\"\n .format(\"Iteration\", \"Cost\", \"Cost reduction\", \"Step norm\",\n \"Optimality\"))\n\n\ndef print_iteration_linear(iteration, cost, cost_reduction, step_norm,\n optimality):\n if cost_reduction is None:\n cost_reduction = \" \" * 15\n else:\n cost_reduction = \"{0:^15.2e}\".format(cost_reduction)\n\n if step_norm is None:\n step_norm = \" \" * 15\n else:\n step_norm = \"{0:^15.2e}\".format(step_norm)\n\n print(\"{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}\".format(\n iteration, cost, cost_reduction, step_norm, optimality))\n\n\n# Simple helper functions.\n\n\ndef compute_grad(J, f):\n \"\"\"Compute gradient of the least-squares cost function.\"\"\"\n if isinstance(J, LinearOperator):\n return J.rmatvec(f)\n else:\n return J.T.dot(f)\n\n\ndef compute_jac_scale(J, scale_inv_old=None):\n \"\"\"Compute variables scale based on the Jacobian matrix.\"\"\"\n if issparse(J):\n scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5\n else:\n scale_inv = np.sum(J**2, axis=0)**0.5\n\n if scale_inv_old is None:\n scale_inv[scale_inv == 0] = 1\n else:\n scale_inv = np.maximum(scale_inv, scale_inv_old)\n\n return 1 / scale_inv, scale_inv\n\n\ndef left_multiplied_operator(J, d):\n \"\"\"Return diag(d) J as LinearOperator.\"\"\"\n J = aslinearoperator(J)\n\n def matvec(x):\n return d * J.matvec(x)\n\n def matmat(X):\n return d[:, np.newaxis] * J.matmat(X)\n\n def rmatvec(x):\n return J.rmatvec(x.ravel() * d)\n\n return LinearOperator(J.shape, matvec=matvec, matmat=matmat,\n rmatvec=rmatvec)\n\n\ndef right_multiplied_operator(J, d):\n \"\"\"Return J diag(d) as LinearOperator.\"\"\"\n J = aslinearoperator(J)\n\n def matvec(x):\n return J.matvec(np.ravel(x) * d)\n\n def matmat(X):\n return J.matmat(X * d[:, np.newaxis])\n\n def rmatvec(x):\n return d * J.rmatvec(x)\n\n return LinearOperator(J.shape, matvec=matvec, matmat=matmat,\n rmatvec=rmatvec)\n\n\ndef regularized_lsq_operator(J, diag):\n \"\"\"Return a matrix arising in regularized least squares as LinearOperator.\n \n The matrix is\n [ J ]\n [ D ]\n where D is diagonal matrix with elements from `diag`.\n \"\"\"\n J = aslinearoperator(J)\n m, n = J.shape\n\n def matvec(x):\n return np.hstack((J.matvec(x), diag * x))\n\n def rmatvec(x):\n x1 = x[:m]\n x2 = x[m:]\n return J.rmatvec(x1) + diag * x2\n\n return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)\n\n\ndef right_multiply(J, d, copy=True):\n \"\"\"Compute J diag(d).\n \n If `copy` is False, `J` is modified in place (unless being LinearOperator).\n \"\"\"\n if copy and not isinstance(J, LinearOperator):\n J = J.copy()\n\n if issparse(J):\n J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.\n elif isinstance(J, LinearOperator):\n J = right_multiplied_operator(J, d)\n else:\n J *= d\n\n return J\n\n\ndef left_multiply(J, d, copy=True):\n \"\"\"Compute diag(d) J.\n \n If `copy` is False, `J` is modified in place (unless being LinearOperator).\n \"\"\"\n if copy and not isinstance(J, LinearOperator):\n J = J.copy()\n\n if issparse(J):\n J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.\n elif isinstance(J, LinearOperator):\n J = left_multiplied_operator(J, d)\n else:\n J *= d[:, np.newaxis]\n\n return J\n\n\ndef check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):\n \"\"\"Check termination condition for nonlinear least squares.\"\"\"\n ftol_satisfied = dF < ftol * F and ratio > 0.25\n xtol_satisfied = dx_norm < xtol * (xtol + x_norm)\n\n if ftol_satisfied and xtol_satisfied:\n return 4\n elif ftol_satisfied:\n return 2\n elif xtol_satisfied:\n return 3\n else:\n return None\n\n\ndef scale_for_robust_loss_function(J, f, rho):\n \"\"\"Scale Jacobian and residuals for a robust loss function.\n \n Arrays are modified in place.\n \"\"\"\n J_scale = rho[1] + 2 * rho[2] * f**2\n J_scale[J_scale < EPS] = EPS\n J_scale **= 0.5\n\n f *= rho[1] / J_scale\n\n return left_multiply(J, J_scale, copy=False), f\n", "path": "scipy/optimize/_lsq/common.py" } ]
[ { "content": "\"\"\"Functions used by least-squares algorithms.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom math import copysign\n\nimport numpy as np\nfrom numpy.linalg import norm\n\nfrom scipy.linalg import cho_factor, cho_solve, LinAlgError\nfrom scipy.sparse import issparse\nfrom scipy.sparse.linalg import LinearOperator, aslinearoperator\n\n\nEPS = np.finfo(float).eps\n\n\n# Functions related to a trust-region problem.\n\n\ndef intersect_trust_region(x, s, Delta):\n \"\"\"Find the intersection of a line with the boundary of a trust region.\n \n This function solves the quadratic equation with respect to t\n ||(x + s*t)||**2 = Delta**2.\n \n Returns\n -------\n t_neg, t_pos : tuple of float\n Negative and positive roots.\n \n Raises\n ------\n ValueError\n If `s` is zero or `x` is not within the trust region.\n \"\"\"\n a = np.dot(s, s)\n if a == 0:\n raise ValueError(\"`s` is zero.\")\n\n b = np.dot(x, s)\n\n c = np.dot(x, x) - Delta**2\n if c > 0:\n raise ValueError(\"`x` is not within the trust region.\")\n\n d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.\n\n # Computations below avoid loss of significance, see \"Numerical Recipes\".\n q = -(b + copysign(d, b))\n t1 = q / a\n t2 = c / q\n\n if t1 < t2:\n return t1, t2\n else:\n return t2, t1\n\n\ndef solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,\n rtol=0.01, max_iter=10):\n \"\"\"Solve a trust-region problem arising in least-squares minimization.\n \n This function implements a method described by J. J. More [1]_ and used\n in MINPACK, but it relies on a single SVD of Jacobian instead of series\n of Cholesky decompositions. Before running this function, compute:\n ``U, s, VT = svd(J, full_matrices=False)``.\n \n Parameters\n ----------\n n : int\n Number of variables.\n m : int\n Number of residuals.\n uf : ndarray\n Computed as U.T.dot(f).\n s : ndarray\n Singular values of J.\n V : ndarray\n Transpose of VT.\n Delta : float\n Radius of a trust region.\n initial_alpha : float, optional\n Initial guess for alpha, which might be available from a previous\n iteration. If None, determined automatically.\n rtol : float, optional\n Stopping tolerance for the root-finding procedure. Namely, the\n solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.\n max_iter : int, optional\n Maximum allowed number of iterations for the root-finding procedure.\n \n Returns\n -------\n p : ndarray, shape (n,)\n Found solution of a trust-region problem.\n alpha : float\n Positive value such that (J.T*J + alpha*I)*p = -J.T*f.\n Sometimes called Levenberg-Marquardt parameter.\n n_iter : int\n Number of iterations made by root-finding procedure. Zero means\n that Gauss-Newton step was selected as the solution.\n \n References\n ----------\n .. [1] More, J. J., \"The Levenberg-Marquardt Algorithm: Implementation\n and Theory,\" Numerical Analysis, ed. G. A. Watson, Lecture Notes\n in Mathematics 630, Springer Verlag, pp. 105-116, 1977.\n \"\"\"\n def phi_and_derivative(alpha, suf, s, Delta):\n \"\"\"Function of which to find zero.\n \n It is defined as \"norm of regularized (by alpha) least-squares\n solution minus `Delta`\". Refer to [1]_.\n \"\"\"\n denom = s**2 + alpha\n p_norm = norm(suf / denom)\n phi = p_norm - Delta\n phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm\n return phi, phi_prime\n\n suf = s * uf\n\n # Check if J has full rank and try Gauss-Newton step.\n if m >= n:\n threshold = EPS * m * s[0]\n full_rank = s[-1] > threshold\n else:\n full_rank = False\n\n if full_rank:\n p = -V.dot(uf / s)\n if norm(p) <= Delta:\n return p, 0.0, 0\n\n alpha_upper = norm(suf) / Delta\n\n if full_rank:\n phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)\n alpha_lower = -phi / phi_prime\n else:\n alpha_lower = 0.0\n\n if initial_alpha is None or not full_rank and initial_alpha == 0:\n alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)\n else:\n alpha = initial_alpha\n\n for it in range(max_iter):\n if alpha < alpha_lower or alpha > alpha_upper:\n alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)\n\n phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)\n\n if phi < 0:\n alpha_upper = alpha\n\n ratio = phi / phi_prime\n alpha_lower = max(alpha_lower, alpha - ratio)\n alpha -= (phi + Delta) * ratio / Delta\n\n if np.abs(phi) < rtol * Delta:\n break\n\n p = -V.dot(suf / (s**2 + alpha))\n\n # Make the norm of p equal to Delta, p is changed only slightly during\n # this. It is done to prevent p lie outside the trust region (which can\n # cause problems later).\n p *= Delta / norm(p)\n\n return p, alpha, it + 1\n\n\ndef solve_trust_region_2d(B, g, Delta):\n \"\"\"Solve a general trust-region problem in 2 dimensions.\n \n The problem is reformulated as a 4-th order algebraic equation,\n the solution of which is found by numpy.roots.\n \n Parameters\n ----------\n B : ndarray, shape (2, 2)\n Symmetric matrix, defines a quadratic term of the function.\n g : ndarray, shape (2,)\n Defines a linear term of the function.\n Delta : float\n Radius of a trust region.\n \n Returns\n -------\n p : ndarray, shape (2,)\n Found solution.\n newton_step : bool\n Whether the returned solution is the Newton step which lies within\n the trust region.\n \"\"\"\n try:\n R, lower = cho_factor(B)\n p = -cho_solve((R, lower), g)\n if np.dot(p, p) <= Delta**2:\n return p, True\n except LinAlgError:\n pass\n\n a = B[0, 0] * Delta**2\n b = B[0, 1] * Delta**2\n c = B[1, 1] * Delta**2\n\n d = g[0] * Delta\n f = g[1] * Delta\n\n coeffs = np.array(\n [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])\n t = np.roots(coeffs) # Can handle leading zeros.\n t = np.real(t[np.isreal(t)])\n\n p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))\n value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)\n i = np.argmin(value)\n p = p[:, i]\n\n return p, False\n\n\ndef update_tr_radius(Delta, actual_reduction, predicted_reduction,\n step_norm, bound_hit):\n \"\"\"Update the radius of a trust region based on the cost reduction.\n\n Returns\n -------\n Delta : float\n New radius.\n ratio : float\n Ratio between actual and predicted reductions.\n \"\"\"\n if predicted_reduction > 0:\n ratio = actual_reduction / predicted_reduction\n elif predicted_reduction == actual_reduction == 0:\n ratio = 1\n else:\n ratio = 0\n\n if ratio < 0.25:\n Delta = 0.25 * step_norm\n elif ratio > 0.75 and bound_hit:\n Delta *= 2.0\n\n return Delta, ratio\n\n\n# Construction and minimization of quadratic functions.\n\n\ndef build_quadratic_1d(J, g, s, diag=None, s0=None):\n \"\"\"Parameterize a multivariate quadratic function along a line.\n \n The resulting univariate quadratic function is given as follows:\n ::\n f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +\n g.T * (s0 + s*t)\n \n Parameters\n ----------\n J : ndarray, sparse matrix or LinearOperator shape (m, n)\n Jacobian matrix, affects the quadratic term.\n g : ndarray, shape (n,)\n Gradient, defines the linear term.\n s : ndarray, shape (n,)\n Direction vector of a line.\n diag : None or ndarray with shape (n,), optional\n Addition diagonal part, affects the quadratic term.\n If None, assumed to be 0.\n s0 : None or ndarray with shape (n,), optional\n Initial point. If None, assumed to be 0.\n \n Returns\n -------\n a : float\n Coefficient for t**2.\n b : float\n Coefficient for t.\n c : float\n Free term. Returned only if `s0` is provided.\n \"\"\"\n v = J.dot(s)\n a = np.dot(v, v)\n if diag is not None:\n a += np.dot(s * diag, s)\n a *= 0.5\n\n b = np.dot(g, s)\n\n if s0 is not None:\n u = J.dot(s0)\n b += np.dot(u, v)\n c = 0.5 * np.dot(u, u) + np.dot(g, s0)\n if diag is not None:\n b += np.dot(s0 * diag, s)\n c += 0.5 * np.dot(s0 * diag, s0)\n return a, b, c\n else:\n return a, b\n\n\ndef minimize_quadratic_1d(a, b, lb, ub, c=0):\n \"\"\"Minimize a 1-d quadratic function subject to bounds.\n \n The free term `c` is 0 by default. Bounds must be finite.\n \n Returns\n -------\n t : float\n Minimum point.\n y : float\n Minimum value.\n \"\"\"\n t = [lb, ub]\n if a != 0:\n extremum = -0.5 * b / a\n if lb < extremum < ub:\n t.append(extremum)\n t = np.asarray(t)\n y = t * (a * t + b) + c\n min_index = np.argmin(y)\n return t[min_index], y[min_index]\n\n\ndef evaluate_quadratic(J, g, s, diag=None):\n \"\"\"Compute values of a quadratic function arising in least squares.\n \n The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.\n \n Parameters\n ----------\n J : ndarray, sparse matrix or LinearOperator, shape (m, n)\n Jacobian matrix, affects the quadratic term.\n g : ndarray, shape (n,)\n Gradient, defines the linear term.\n s : ndarray, shape (k, n) or (n,)\n Array containing steps as rows.\n diag : ndarray, shape (n,), optional\n Addition diagonal part, affects the quadratic term.\n If None, assumed to be 0.\n \n Returns\n -------\n values : ndarray with shape (k,) or float\n Values of the function. If `s` was 2-dimensional then ndarray is\n returned, otherwise float is returned.\n \"\"\"\n if s.ndim == 1:\n Js = J.dot(s)\n q = np.dot(Js, Js)\n if diag is not None:\n q += np.dot(s * diag, s)\n else:\n Js = J.dot(s.T)\n q = np.sum(Js**2, axis=0)\n if diag is not None:\n q += np.sum(diag * s**2, axis=1)\n\n l = np.dot(s, g)\n\n return 0.5 * q + l\n\n\n# Utility functions to work with bound constraints.\n\n\ndef in_bounds(x, lb, ub):\n \"\"\"Check if a point lies within bounds.\"\"\"\n return np.all((x >= lb) & (x <= ub))\n\n\ndef step_size_to_bound(x, s, lb, ub):\n \"\"\"Compute a min_step size required to reach a bound.\n \n The function computes a positive scalar t, such that x + s * t is on\n the bound.\n \n Returns\n -------\n step : float\n Computed step. Non-negative value.\n hits : ndarray of int with shape of x\n Each element indicates whether a corresponding variable reaches the\n bound:\n \n * 0 - the bound was not hit.\n * -1 - the lower bound was hit.\n * 1 - the upper bound was hit.\n \"\"\"\n non_zero = np.nonzero(s)\n s_non_zero = s[non_zero]\n steps = np.empty_like(x)\n steps.fill(np.inf)\n with np.errstate(over='ignore'):\n steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,\n (ub - x)[non_zero] / s_non_zero)\n min_step = np.min(steps)\n return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)\n\n\ndef find_active_constraints(x, lb, ub, rtol=1e-10):\n \"\"\"Determine which constraints are active in a given point.\n \n The threshold is computed using `rtol` and the absolute value of the\n closest bound.\n \n Returns\n -------\n active : ndarray of int with shape of x\n Each component shows whether the corresponding constraint is active:\n \n * 0 - a constraint is not active.\n * -1 - a lower bound is active.\n * 1 - a upper bound is active.\n \"\"\"\n active = np.zeros_like(x, dtype=int)\n\n if rtol == 0:\n active[x <= lb] = -1\n active[x >= ub] = 1\n return active\n\n lower_dist = x - lb\n upper_dist = ub - x\n\n lower_threshold = rtol * np.maximum(1, np.abs(lb))\n upper_threshold = rtol * np.maximum(1, np.abs(ub))\n\n lower_active = (np.isfinite(lb) &\n (lower_dist <= np.minimum(upper_dist, lower_threshold)))\n active[lower_active] = -1\n\n upper_active = (np.isfinite(ub) &\n (upper_dist <= np.minimum(lower_dist, upper_threshold)))\n active[upper_active] = 1\n\n return active\n\n\ndef make_strictly_feasible(x, lb, ub, rstep=1e-10):\n \"\"\"Shift a point to the interior of a feasible region.\n \n Each element of the returned vector is at least at a relative distance\n `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.\n \"\"\"\n x_new = x.copy()\n\n active = find_active_constraints(x, lb, ub, rstep)\n lower_mask = np.equal(active, -1)\n upper_mask = np.equal(active, 1)\n\n if rstep == 0:\n x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])\n x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])\n else:\n x_new[lower_mask] = (lb[lower_mask] +\n rstep * np.maximum(1, np.abs(lb[lower_mask])))\n x_new[upper_mask] = (ub[upper_mask] -\n rstep * np.maximum(1, np.abs(ub[upper_mask])))\n\n tight_bounds = (x_new < lb) | (x_new > ub)\n x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])\n\n return x_new\n\n \ndef CL_scaling_vector(x, g, lb, ub):\n \"\"\"Compute Coleman-Li scaling vector and its derivatives.\n \n Components of a vector v are defined as follows:\n ::\n | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf\n v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf\n | 1, otherwise\n \n According to this definition v[i] >= 0 for all i. It differs from the\n definition in paper [1]_ (eq. (2.2)), where the absolute value of v is\n used. Both definitions are equivalent down the line.\n Derivatives of v with respect to x take value 1, -1 or 0 depending on a\n case.\n \n Returns\n -------\n v : ndarray with shape of x\n Scaling vector.\n dv : ndarray with shape of x\n Derivatives of v[i] with respect to x[i], diagonal elements of v's\n Jacobian.\n \n References\n ----------\n .. [1] M.A. Branch, T.F. Coleman, and Y. Li, \"A Subspace, Interior,\n and Conjugate Gradient Method for Large-Scale Bound-Constrained\n Minimization Problems,\" SIAM Journal on Scientific Computing,\n Vol. 21, Number 1, pp 1-23, 1999.\n \"\"\"\n v = np.ones_like(x)\n dv = np.zeros_like(x)\n\n mask = (g < 0) & np.isfinite(ub)\n v[mask] = ub[mask] - x[mask]\n dv[mask] = -1\n\n mask = (g > 0) & np.isfinite(lb)\n v[mask] = x[mask] - lb[mask]\n dv[mask] = 1\n\n return v, dv\n\n\ndef reflective_transformation(y, lb, ub):\n \"\"\"Compute reflective transformation and its gradient.\"\"\"\n if in_bounds(y, lb, ub):\n return y, np.ones_like(y)\n\n lb_finite = np.isfinite(lb)\n ub_finite = np.isfinite(ub)\n\n x = y.copy()\n g_negative = np.zeros_like(y, dtype=bool)\n\n mask = lb_finite & ~ub_finite\n x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])\n g_negative[mask] = y[mask] < lb[mask]\n\n mask = ~lb_finite & ub_finite\n x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])\n g_negative[mask] = y[mask] > ub[mask]\n\n mask = lb_finite & ub_finite\n d = ub - lb\n t = np.remainder(y[mask] - lb[mask], 2 * d[mask])\n x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)\n g_negative[mask] = t > d[mask]\n\n g = np.ones_like(y)\n g[g_negative] = -1\n\n return x, g\n\n\n# Functions to display algorithm's progress.\n\n\ndef print_header_nonlinear():\n print(\"{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}\"\n .format(\"Iteration\", \"Total nfev\", \"Cost\", \"Cost reduction\",\n \"Step norm\", \"Optimality\"))\n\n\ndef print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,\n step_norm, optimality):\n if cost_reduction is None:\n cost_reduction = \" \" * 15\n else:\n cost_reduction = \"{0:^15.2e}\".format(cost_reduction)\n\n if step_norm is None:\n step_norm = \" \" * 15\n else:\n step_norm = \"{0:^15.2e}\".format(step_norm)\n\n print(\"{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}\"\n .format(iteration, nfev, cost, cost_reduction,\n step_norm, optimality))\n\n\ndef print_header_linear():\n print(\"{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}\"\n .format(\"Iteration\", \"Cost\", \"Cost reduction\", \"Step norm\",\n \"Optimality\"))\n\n\ndef print_iteration_linear(iteration, cost, cost_reduction, step_norm,\n optimality):\n if cost_reduction is None:\n cost_reduction = \" \" * 15\n else:\n cost_reduction = \"{0:^15.2e}\".format(cost_reduction)\n\n if step_norm is None:\n step_norm = \" \" * 15\n else:\n step_norm = \"{0:^15.2e}\".format(step_norm)\n\n print(\"{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}\".format(\n iteration, cost, cost_reduction, step_norm, optimality))\n\n\n# Simple helper functions.\n\n\ndef compute_grad(J, f):\n \"\"\"Compute gradient of the least-squares cost function.\"\"\"\n if isinstance(J, LinearOperator):\n return J.rmatvec(f)\n else:\n return J.T.dot(f)\n\n\ndef compute_jac_scale(J, scale_inv_old=None):\n \"\"\"Compute variables scale based on the Jacobian matrix.\"\"\"\n if issparse(J):\n scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5\n else:\n scale_inv = np.sum(J**2, axis=0)**0.5\n\n if scale_inv_old is None:\n scale_inv[scale_inv == 0] = 1\n else:\n scale_inv = np.maximum(scale_inv, scale_inv_old)\n\n return 1 / scale_inv, scale_inv\n\n\ndef left_multiplied_operator(J, d):\n \"\"\"Return diag(d) J as LinearOperator.\"\"\"\n J = aslinearoperator(J)\n\n def matvec(x):\n return d * J.matvec(x)\n\n def matmat(X):\n return d[:, np.newaxis] * J.matmat(X)\n\n def rmatvec(x):\n return J.rmatvec(x.ravel() * d)\n\n return LinearOperator(J.shape, matvec=matvec, matmat=matmat,\n rmatvec=rmatvec)\n\n\ndef right_multiplied_operator(J, d):\n \"\"\"Return J diag(d) as LinearOperator.\"\"\"\n J = aslinearoperator(J)\n\n def matvec(x):\n return J.matvec(np.ravel(x) * d)\n\n def matmat(X):\n return J.matmat(X * d[:, np.newaxis])\n\n def rmatvec(x):\n return d * J.rmatvec(x)\n\n return LinearOperator(J.shape, matvec=matvec, matmat=matmat,\n rmatvec=rmatvec)\n\n\ndef regularized_lsq_operator(J, diag):\n \"\"\"Return a matrix arising in regularized least squares as LinearOperator.\n \n The matrix is\n [ J ]\n [ D ]\n where D is diagonal matrix with elements from `diag`.\n \"\"\"\n J = aslinearoperator(J)\n m, n = J.shape\n\n def matvec(x):\n return np.hstack((J.matvec(x), diag * x))\n\n def rmatvec(x):\n x1 = x[:m]\n x2 = x[m:]\n return J.rmatvec(x1) + diag * x2\n\n return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)\n\n\ndef right_multiply(J, d, copy=True):\n \"\"\"Compute J diag(d).\n \n If `copy` is False, `J` is modified in place (unless being LinearOperator).\n \"\"\"\n if copy and not isinstance(J, LinearOperator):\n J = J.copy()\n\n if issparse(J):\n J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.\n elif isinstance(J, LinearOperator):\n J = right_multiplied_operator(J, d)\n else:\n J *= d\n\n return J\n\n\ndef left_multiply(J, d, copy=True):\n \"\"\"Compute diag(d) J.\n \n If `copy` is False, `J` is modified in place (unless being LinearOperator).\n \"\"\"\n if copy and not isinstance(J, LinearOperator):\n J = J.copy()\n\n if issparse(J):\n J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.\n elif isinstance(J, LinearOperator):\n J = left_multiplied_operator(J, d)\n else:\n J *= d[:, np.newaxis]\n\n return J\n\n\ndef check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):\n \"\"\"Check termination condition for nonlinear least squares.\"\"\"\n ftol_satisfied = dF < ftol * F and ratio > 0.25\n xtol_satisfied = dx_norm < xtol * (xtol + x_norm)\n\n if ftol_satisfied and xtol_satisfied:\n return 4\n elif ftol_satisfied:\n return 2\n elif xtol_satisfied:\n return 3\n else:\n return None\n\n\ndef scale_for_robust_loss_function(J, f, rho):\n \"\"\"Scale Jacobian and residuals for a robust loss function.\n \n Arrays are modified in place.\n \"\"\"\n J_scale = rho[1] + 2 * rho[2] * f**2\n J_scale[J_scale < EPS] = EPS\n J_scale **= 0.5\n\n f *= rho[1] / J_scale\n\n return left_multiply(J, J_scale, copy=False), f\n", "path": "scipy/optimize/_lsq/common.py" } ]
diff --git a/scipy/optimize/_lsq/common.py b/scipy/optimize/_lsq/common.py index 65444646ef4e..3bde207034a0 100644 --- a/scipy/optimize/_lsq/common.py +++ b/scipy/optimize/_lsq/common.py @@ -319,7 +319,7 @@ def minimize_quadratic_1d(a, b, lb, ub, c=0): if lb < extremum < ub: t.append(extremum) t = np.asarray(t) - y = a * t**2 + b * t + c + y = t * (a * t + b) + c min_index = np.argmin(y) return t[min_index], y[min_index] diff --git a/scipy/optimize/tests/test_lsq_common.py b/scipy/optimize/tests/test_lsq_common.py index bda69c9ec99a..35ee2f96d05b 100644 --- a/scipy/optimize/tests/test_lsq_common.py +++ b/scipy/optimize/tests/test_lsq_common.py @@ -156,20 +156,45 @@ def test_minimize_quadratic_1d(self): t, y = minimize_quadratic_1d(a, b, 1, 2) assert_equal(t, 1) - assert_equal(y, a * t**2 + b * t) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -2, -1) assert_equal(t, -1) - assert_equal(y, a * t**2 + b * t) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -1, 1) assert_equal(t, 0.1) - assert_equal(y, a * t**2 + b * t) + assert_allclose(y, a * t**2 + b * t, rtol=1e-15) c = 10 t, y = minimize_quadratic_1d(a, b, -1, 1, c=c) assert_equal(t, 0.1) - assert_equal(y, a * t**2 + b * t + c) + assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c) + assert_equal(t, 0.1) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c) + assert_equal(t, 0) + assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) + + a = -1 + b = 0.2 + t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf) + assert_equal(y, -np.inf) + + t, y = minimize_quadratic_1d(a, b, 0, np.inf) + assert_equal(t, np.inf) + assert_equal(y, -np.inf) + + t, y = minimize_quadratic_1d(a, b, -np.inf, 0) + assert_equal(t, -np.inf) + assert_equal(y, -np.inf) def test_evaluate_quadratic(self): s = np.array([1.0, -1.0])
iterative__dvc-562
dvc repro --force does not work It looks like the flag was just ignored.
[ { "content": "import os\nimport stat\nimport networkx as nx\n\nimport dvc.cloud.base as cloud\n\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\nfrom dvc.stage import Stage, Output\nfrom dvc.config import Config\nfrom dvc.state import State\nfrom dvc.lock import Lock\nfrom dvc.scm import SCM\nfrom dvc.cache import Cache\nfrom dvc.cloud.data_cloud import DataCloud\nfrom dvc.system import System\n\n\nclass StageNotFoundError(DvcException):\n def __init__(self, path):\n msg = 'Stage file {} does not exist'.format(path)\n super(StageNotFoundError, self).__init__(msg)\n\n\nclass ReproductionError(DvcException):\n def __init__(self, dvc_file_name, ex):\n msg = 'Failed to reproduce \\'{}\\''.format(dvc_file_name)\n super(ReproductionError, self).__init__(msg, cause=ex)\n\n\nclass Project(object):\n DVC_DIR = '.dvc'\n\n def __init__(self, root_dir):\n self.root_dir = os.path.abspath(os.path.realpath(root_dir))\n self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)\n\n self.scm = SCM(self.root_dir)\n self.lock = Lock(self.dvc_dir)\n self.cache = Cache(self.dvc_dir)\n self.state = State(self.root_dir, self.dvc_dir)\n self.config = Config(self.dvc_dir)\n self.logger = Logger(self.config._config)\n self.cloud = DataCloud(self.cache, self.config._config)\n\n @staticmethod\n def init(root_dir=os.curdir):\n \"\"\"\n Initiate dvc project in directory.\n\n Args:\n root_dir: Path to project's root directory.\n\n Returns:\n Project instance.\n\n Raises:\n KeyError: Raises an exception.\n \"\"\"\n root_dir = os.path.abspath(root_dir)\n dvc_dir = os.path.join(root_dir, Project.DVC_DIR)\n os.mkdir(dvc_dir)\n\n config = Config.init(dvc_dir)\n cache = Cache.init(dvc_dir)\n state = State.init(root_dir, dvc_dir)\n lock = Lock(dvc_dir)\n\n scm = SCM(root_dir)\n scm.ignore_list([cache.cache_dir,\n state.state_file,\n lock.lock_file])\n\n ignore_file = os.path.join(dvc_dir, scm.ignore_file())\n scm.add([config.config_file, ignore_file])\n\n return Project(root_dir)\n\n def to_dvc_path(self, path):\n return os.path.relpath(path, self.root_dir)\n\n def add(self, fname):\n out = os.path.basename(fname)\n stage_fname = out + Stage.STAGE_FILE_SUFFIX\n cwd = os.path.dirname(os.path.abspath(fname))\n stage = Stage.loads(project=self,\n cmd=None,\n deps=[],\n outs=[out],\n fname=stage_fname,\n cwd=cwd)\n\n stage.save()\n stage.dump()\n return stage\n\n def remove(self, target):\n if not Stage.is_stage_file(target):\n raise StageNotFoundError(target)\n\n stage = Stage.load(self, target)\n for out in stage.outs:\n out.remove()\n\n return stage\n\n def run(self,\n cmd=None,\n deps=[],\n outs=[],\n outs_no_cache=[],\n fname=Stage.STAGE_FILE,\n cwd=os.curdir,\n no_exec=False):\n stage = Stage.loads(project=self,\n fname=fname,\n cmd=cmd,\n cwd=cwd,\n outs=outs,\n outs_no_cache=outs_no_cache,\n deps=deps)\n if not no_exec:\n stage.run()\n stage.dump()\n return stage\n\n def _reproduce_stage(self, stages, node, force):\n if not stages[node].changed():\n return []\n\n stages[node].reproduce(force=force)\n stages[node].dump()\n return [stages[node]]\n\n def reproduce(self, target, recursive=True, force=False):\n stages = nx.get_node_attributes(self.graph(), 'stage')\n node = os.path.relpath(os.path.abspath(target), self.root_dir)\n if node not in stages:\n raise StageNotFoundError(target)\n\n if recursive:\n return self._reproduce_stages(stages, node, force)\n\n return self._reproduce_stage(stages, node, force)\n\n def _reproduce_stages(self, stages, node, force):\n result = []\n for n in nx.dfs_postorder_nodes(self.graph(), node):\n try:\n result += self._reproduce_stage(stages, n, force)\n except Exception as ex:\n raise ReproductionError(stages[n].relpath, ex)\n return result\n\n def _remove_untracked_hardlinks(self):\n untracked = self.scm.untracked_files()\n cache = dict((System.inode(c), c) for c in self.cache.all())\n for file in untracked:\n inode = System.inode(file)\n if inode not in cache.keys():\n continue\n\n Logger.info(u'Remove \\'{}\\''.format(file))\n os.remove(file)\n\n dir = os.path.dirname(file)\n if len(dir) != 0 and not os.listdir(dir):\n Logger.info(u'Remove empty directory \\'{}\\''.format(dir))\n os.removedirs(dir)\n\n def checkout(self):\n self._remove_untracked_hardlinks()\n for stage in self.stages():\n stage.checkout()\n\n def _used_cache(self, target=None):\n cache_set = set()\n\n if target:\n stages = [Stage.load(self, target)]\n else:\n stages = self.stages()\n\n for stage in stages:\n for out in stage.outs:\n if not out.use_cache:\n continue\n cache_set |= set([out.cache])\n if out.is_dir_cache(out.cache) and os.path.isfile(out.cache):\n dir_cache = out.dir_cache()\n cache_set |= set(dir_cache.values())\n\n return list(cache_set)\n\n def gc(self):\n clist = self._used_cache()\n for cache in self.cache.all():\n if cache in clist:\n continue\n os.unlink(cache)\n self.logger.info(u'\\'{}\\' was removed'.format(self.to_dvc_path(cache)))\n\n def push(self, target=None, jobs=1):\n return self.cloud.push(self._used_cache(target), jobs)\n\n def fetch(self, target=None, jobs=1):\n return self.cloud.pull(self._used_cache(target), jobs)\n\n def pull(self, target=None, jobs=1):\n ret = self.fetch(target, jobs)\n self.checkout()\n return ret\n\n def _local_status(self, target=None):\n status = {}\n\n if target:\n stages = [Stage.load(self, target)]\n else:\n stages = self.stages()\n\n for stage in self.stages():\n status.update(stage.status())\n\n return status\n\n def _cloud_status(self, target=None, jobs=1):\n status = {}\n for target, ret in self.cloud.status(self._used_cache(target), jobs):\n if ret == cloud.STATUS_UNKNOWN or ret == cloud.STATUS_OK:\n continue\n\n prefix_map = {\n cloud.STATUS_DELETED: 'deleted',\n cloud.STATUS_MODIFIED: 'modified',\n cloud.STATUS_NEW: 'new',\n }\n\n path = os.path.relpath(target, self.cache.cache_dir)\n\n status[path] = prefix_map[ret]\n\n return status\n\n def status(self, target=None, jobs=1, cloud=False):\n if cloud:\n return self._cloud_status(target, jobs)\n return self._local_status(target)\n\n def graph(self):\n G = nx.DiGraph()\n\n for stage in self.stages():\n node = os.path.relpath(stage.path, self.root_dir)\n G.add_node(node, stage=stage)\n for dep in stage.deps:\n dep_stage = dep.stage()\n if not dep_stage:\n continue\n dep_node = os.path.relpath(dep_stage.path, self.root_dir)\n G.add_node(dep_node, stage=dep_stage)\n G.add_edge(node, dep_node)\n\n return G\n\n def stages(self):\n stages = []\n for root, dirs, files in os.walk(self.root_dir):\n for fname in files:\n path = os.path.join(root, fname)\n if not Stage.is_stage_file(path):\n continue\n stages.append(Stage.load(self, path))\n return stages\n\n def outs(self):\n outs = []\n for stage in self.stages():\n outs += stage.outs\n return outs\n", "path": "dvc/project.py" } ]
[ { "content": "import os\nimport stat\nimport networkx as nx\n\nimport dvc.cloud.base as cloud\n\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\nfrom dvc.stage import Stage, Output\nfrom dvc.config import Config\nfrom dvc.state import State\nfrom dvc.lock import Lock\nfrom dvc.scm import SCM\nfrom dvc.cache import Cache\nfrom dvc.cloud.data_cloud import DataCloud\nfrom dvc.system import System\n\n\nclass StageNotFoundError(DvcException):\n def __init__(self, path):\n msg = 'Stage file {} does not exist'.format(path)\n super(StageNotFoundError, self).__init__(msg)\n\n\nclass ReproductionError(DvcException):\n def __init__(self, dvc_file_name, ex):\n msg = 'Failed to reproduce \\'{}\\''.format(dvc_file_name)\n super(ReproductionError, self).__init__(msg, cause=ex)\n\n\nclass Project(object):\n DVC_DIR = '.dvc'\n\n def __init__(self, root_dir):\n self.root_dir = os.path.abspath(os.path.realpath(root_dir))\n self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)\n\n self.scm = SCM(self.root_dir)\n self.lock = Lock(self.dvc_dir)\n self.cache = Cache(self.dvc_dir)\n self.state = State(self.root_dir, self.dvc_dir)\n self.config = Config(self.dvc_dir)\n self.logger = Logger(self.config._config)\n self.cloud = DataCloud(self.cache, self.config._config)\n\n @staticmethod\n def init(root_dir=os.curdir):\n \"\"\"\n Initiate dvc project in directory.\n\n Args:\n root_dir: Path to project's root directory.\n\n Returns:\n Project instance.\n\n Raises:\n KeyError: Raises an exception.\n \"\"\"\n root_dir = os.path.abspath(root_dir)\n dvc_dir = os.path.join(root_dir, Project.DVC_DIR)\n os.mkdir(dvc_dir)\n\n config = Config.init(dvc_dir)\n cache = Cache.init(dvc_dir)\n state = State.init(root_dir, dvc_dir)\n lock = Lock(dvc_dir)\n\n scm = SCM(root_dir)\n scm.ignore_list([cache.cache_dir,\n state.state_file,\n lock.lock_file])\n\n ignore_file = os.path.join(dvc_dir, scm.ignore_file())\n scm.add([config.config_file, ignore_file])\n\n return Project(root_dir)\n\n def to_dvc_path(self, path):\n return os.path.relpath(path, self.root_dir)\n\n def add(self, fname):\n out = os.path.basename(fname)\n stage_fname = out + Stage.STAGE_FILE_SUFFIX\n cwd = os.path.dirname(os.path.abspath(fname))\n stage = Stage.loads(project=self,\n cmd=None,\n deps=[],\n outs=[out],\n fname=stage_fname,\n cwd=cwd)\n\n stage.save()\n stage.dump()\n return stage\n\n def remove(self, target):\n if not Stage.is_stage_file(target):\n raise StageNotFoundError(target)\n\n stage = Stage.load(self, target)\n for out in stage.outs:\n out.remove()\n\n return stage\n\n def run(self,\n cmd=None,\n deps=[],\n outs=[],\n outs_no_cache=[],\n fname=Stage.STAGE_FILE,\n cwd=os.curdir,\n no_exec=False):\n stage = Stage.loads(project=self,\n fname=fname,\n cmd=cmd,\n cwd=cwd,\n outs=outs,\n outs_no_cache=outs_no_cache,\n deps=deps)\n if not no_exec:\n stage.run()\n stage.dump()\n return stage\n\n def _reproduce_stage(self, stages, node, force):\n if not stages[node].changed() and not force:\n return []\n\n stages[node].reproduce(force=force)\n stages[node].dump()\n return [stages[node]]\n\n def reproduce(self, target, recursive=True, force=False):\n stages = nx.get_node_attributes(self.graph(), 'stage')\n node = os.path.relpath(os.path.abspath(target), self.root_dir)\n if node not in stages:\n raise StageNotFoundError(target)\n\n if recursive:\n return self._reproduce_stages(stages, node, force)\n\n return self._reproduce_stage(stages, node, force)\n\n def _reproduce_stages(self, stages, node, force):\n result = []\n for n in nx.dfs_postorder_nodes(self.graph(), node):\n try:\n result += self._reproduce_stage(stages, n, force)\n except Exception as ex:\n raise ReproductionError(stages[n].relpath, ex)\n return result\n\n def _remove_untracked_hardlinks(self):\n untracked = self.scm.untracked_files()\n cache = dict((System.inode(c), c) for c in self.cache.all())\n for file in untracked:\n inode = System.inode(file)\n if inode not in cache.keys():\n continue\n\n Logger.info(u'Remove \\'{}\\''.format(file))\n os.remove(file)\n\n dir = os.path.dirname(file)\n if len(dir) != 0 and not os.listdir(dir):\n Logger.info(u'Remove empty directory \\'{}\\''.format(dir))\n os.removedirs(dir)\n\n def checkout(self):\n self._remove_untracked_hardlinks()\n for stage in self.stages():\n stage.checkout()\n\n def _used_cache(self, target=None):\n cache_set = set()\n\n if target:\n stages = [Stage.load(self, target)]\n else:\n stages = self.stages()\n\n for stage in stages:\n for out in stage.outs:\n if not out.use_cache:\n continue\n cache_set |= set([out.cache])\n if out.is_dir_cache(out.cache) and os.path.isfile(out.cache):\n dir_cache = out.dir_cache()\n cache_set |= set(dir_cache.values())\n\n return list(cache_set)\n\n def gc(self):\n clist = self._used_cache()\n for cache in self.cache.all():\n if cache in clist:\n continue\n os.unlink(cache)\n self.logger.info(u'\\'{}\\' was removed'.format(self.to_dvc_path(cache)))\n\n def push(self, target=None, jobs=1):\n return self.cloud.push(self._used_cache(target), jobs)\n\n def fetch(self, target=None, jobs=1):\n return self.cloud.pull(self._used_cache(target), jobs)\n\n def pull(self, target=None, jobs=1):\n ret = self.fetch(target, jobs)\n self.checkout()\n return ret\n\n def _local_status(self, target=None):\n status = {}\n\n if target:\n stages = [Stage.load(self, target)]\n else:\n stages = self.stages()\n\n for stage in self.stages():\n status.update(stage.status())\n\n return status\n\n def _cloud_status(self, target=None, jobs=1):\n status = {}\n for target, ret in self.cloud.status(self._used_cache(target), jobs):\n if ret == cloud.STATUS_UNKNOWN or ret == cloud.STATUS_OK:\n continue\n\n prefix_map = {\n cloud.STATUS_DELETED: 'deleted',\n cloud.STATUS_MODIFIED: 'modified',\n cloud.STATUS_NEW: 'new',\n }\n\n path = os.path.relpath(target, self.cache.cache_dir)\n\n status[path] = prefix_map[ret]\n\n return status\n\n def status(self, target=None, jobs=1, cloud=False):\n if cloud:\n return self._cloud_status(target, jobs)\n return self._local_status(target)\n\n def graph(self):\n G = nx.DiGraph()\n\n for stage in self.stages():\n node = os.path.relpath(stage.path, self.root_dir)\n G.add_node(node, stage=stage)\n for dep in stage.deps:\n dep_stage = dep.stage()\n if not dep_stage:\n continue\n dep_node = os.path.relpath(dep_stage.path, self.root_dir)\n G.add_node(dep_node, stage=dep_stage)\n G.add_edge(node, dep_node)\n\n return G\n\n def stages(self):\n stages = []\n for root, dirs, files in os.walk(self.root_dir):\n for fname in files:\n path = os.path.join(root, fname)\n if not Stage.is_stage_file(path):\n continue\n stages.append(Stage.load(self, path))\n return stages\n\n def outs(self):\n outs = []\n for stage in self.stages():\n outs += stage.outs\n return outs\n", "path": "dvc/project.py" } ]
diff --git a/.appveyor.yml b/.appveyor.yml index ec4f0c6e3e..c00ffffaed 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -41,14 +41,11 @@ environment: # PYTHON_ARCH: "64" install: - - cinst graphviz - cinst wget - cinst awscli - cinst gsutil - cinst openssl.light - - wget --no-check-certificate https://github.com/dpinney/omf/raw/master/omf/static/pygraphviz-1.3.1-cp27-none-win32.whl - - pip install --upgrade pip setuptools - - pip install pygraphviz-1.3.1-cp27-none-win32.whl + - pip install --user --upgrade pip - pip install -r requirements.txt - python setup.py install diff --git a/dvc/project.py b/dvc/project.py index 7e48eda13b..0a9e3e1a4c 100644 --- a/dvc/project.py +++ b/dvc/project.py @@ -125,7 +125,7 @@ def run(self, return stage def _reproduce_stage(self, stages, node, force): - if not stages[node].changed(): + if not stages[node].changed() and not force: return [] stages[node].reproduce(force=force) diff --git a/tests/test_repro.py b/tests/test_repro.py index fc995a67d4..afd58ddd58 100644 --- a/tests/test_repro.py +++ b/tests/test_repro.py @@ -25,6 +25,12 @@ def setUp(self): cmd='python {} {} {}'.format(self.CODE, self.FOO, self.file1)) +class TestReproForce(TestRepro): + def test(self): + stages = self.dvc.reproduce(self.file1_stage, force=True) + self.assertEqual(len(stages), 2) + + class TestReproChangedCode(TestRepro): def test(self): self.swap_code()
pypa__pip-10583
`vendoring` is broken, due to a cyclic dependency during license fetching Well, the lack of maintainance of the license fetching logic in `vendoring` has come to bite us. :) `flit` recently established a cyclic dependency, by depending on `tomli`: see https://github.com/takluyver/flit/issues/451 and https://flit.readthedocs.io/en/latest/bootstrap.html. We get licenses from sdists in `vendoring` (which means building metadata / wheels -- leading to https://github.com/pradyunsg/vendoring/issues/1). Since flit is no longer bootstrappable through regular mechanisms, it'd be best to switch to using wheels for the license fetch phase. This is the tracking issue for actually fixing this, and adopting the fix in our workflow.
[ { "content": "\"\"\"Automation using nox.\n\"\"\"\n\nimport glob\nimport os\nimport shutil\nimport sys\nfrom pathlib import Path\nfrom typing import Iterator, List, Tuple\n\nimport nox\n\n# fmt: off\nsys.path.append(\".\")\nfrom tools import release # isort:skip # noqa\nsys.path.pop()\n# fmt: on\n\nnox.options.reuse_existing_virtualenvs = True\nnox.options.sessions = [\"lint\"]\n\nLOCATIONS = {\n \"common-wheels\": \"tests/data/common_wheels\",\n \"protected-pip\": \"tools/tox_pip.py\",\n}\nREQUIREMENTS = {\n \"docs\": \"docs/requirements.txt\",\n \"tests\": \"tests/requirements.txt\",\n \"common-wheels\": \"tests/requirements-common_wheels.txt\",\n}\n\nAUTHORS_FILE = \"AUTHORS.txt\"\nVERSION_FILE = \"src/pip/__init__.py\"\n\n\ndef run_with_protected_pip(session: nox.Session, *arguments: str) -> None:\n \"\"\"Do a session.run(\"pip\", *arguments), using a \"protected\" pip.\n\n This invokes a wrapper script, that forwards calls to original virtualenv\n (stable) version, and not the code being tested. This ensures pip being\n used is not the code being tested.\n \"\"\"\n env = {\"VIRTUAL_ENV\": session.virtualenv.location}\n\n command = (\"python\", LOCATIONS[\"protected-pip\"]) + arguments\n session.run(*command, env=env, silent=True)\n\n\ndef should_update_common_wheels() -> bool:\n # If the cache hasn't been created, create it.\n if not os.path.exists(LOCATIONS[\"common-wheels\"]):\n return True\n\n # If the requirements was updated after cache, we'll repopulate it.\n cache_last_populated_at = os.path.getmtime(LOCATIONS[\"common-wheels\"])\n requirements_updated_at = os.path.getmtime(REQUIREMENTS[\"common-wheels\"])\n need_to_repopulate = requirements_updated_at > cache_last_populated_at\n\n # Clear the stale cache.\n if need_to_repopulate:\n shutil.rmtree(LOCATIONS[\"common-wheels\"], ignore_errors=True)\n\n return need_to_repopulate\n\n\n# -----------------------------------------------------------------------------\n# Development Commands\n# These are currently prototypes to evaluate whether we want to switch over\n# completely to nox for all our automation. Contributors should prefer using\n# `tox -e ...` until this note is removed.\n# -----------------------------------------------------------------------------\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy3\"])\ndef test(session: nox.Session) -> None:\n # Get the common wheels.\n if should_update_common_wheels():\n # fmt: off\n run_with_protected_pip(\n session,\n \"wheel\",\n \"-w\", LOCATIONS[\"common-wheels\"],\n \"-r\", REQUIREMENTS[\"common-wheels\"],\n )\n # fmt: on\n else:\n msg = f\"Re-using existing common-wheels at {LOCATIONS['common-wheels']}.\"\n session.log(msg)\n\n # Build source distribution\n sdist_dir = os.path.join(session.virtualenv.location, \"sdist\")\n if os.path.exists(sdist_dir):\n shutil.rmtree(sdist_dir, ignore_errors=True)\n\n # fmt: off\n session.run(\n \"python\", \"setup.py\", \"sdist\", \"--formats=zip\", \"--dist-dir\", sdist_dir,\n silent=True,\n )\n # fmt: on\n\n generated_files = os.listdir(sdist_dir)\n assert len(generated_files) == 1\n generated_sdist = os.path.join(sdist_dir, generated_files[0])\n\n # Install source distribution\n run_with_protected_pip(session, \"install\", generated_sdist)\n\n # Install test dependencies\n run_with_protected_pip(session, \"install\", \"-r\", REQUIREMENTS[\"tests\"])\n\n # Parallelize tests as much as possible, by default.\n arguments = session.posargs or [\"-n\", \"auto\"]\n\n # Run the tests\n # LC_CTYPE is set to get UTF-8 output inside of the subprocesses that our\n # tests use.\n session.run(\"pytest\", *arguments, env={\"LC_CTYPE\": \"en_US.UTF-8\"})\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-e\", \".\")\n session.install(\"-r\", REQUIREMENTS[\"docs\"])\n\n def get_sphinx_build_command(kind: str) -> List[str]:\n # Having the conf.py in the docs/html is weird but needed because we\n # can not use a different configuration directory vs source directory\n # on RTD currently. So, we'll pass \"-c docs/html\" here.\n # See https://github.com/rtfd/readthedocs.org/issues/1543.\n # fmt: off\n return [\n \"sphinx-build\",\n \"-W\",\n \"-c\", \"docs/html\", # see note above\n \"-d\", \"docs/build/doctrees/\" + kind,\n \"-b\", kind,\n \"docs/\" + kind,\n \"docs/build/\" + kind,\n ]\n # fmt: on\n\n session.run(*get_sphinx_build_command(\"html\"))\n session.run(*get_sphinx_build_command(\"man\"))\n\n\[email protected](name=\"docs-live\")\ndef docs_live(session: nox.Session) -> None:\n session.install(\"-e\", \".\")\n session.install(\"-r\", REQUIREMENTS[\"docs\"], \"sphinx-autobuild\")\n\n session.run(\n \"sphinx-autobuild\",\n \"-d=docs/build/doctrees/livehtml\",\n \"-b=dirhtml\",\n \"docs/html\",\n \"docs/build/livehtml\",\n *session.posargs,\n )\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n\n if session.posargs:\n args = session.posargs + [\"--all-files\"]\n else:\n args = [\"--all-files\", \"--show-diff-on-failure\"]\n\n session.run(\"pre-commit\", \"run\", *args)\n\n\[email protected]\ndef vendoring(session: nox.Session) -> None:\n session.install(\"vendoring~=1.0.0\")\n\n if \"--upgrade\" not in session.posargs:\n session.run(\"vendoring\", \"sync\", \"-v\")\n return\n\n def pinned_requirements(path: Path) -> Iterator[Tuple[str, str]]:\n for line in path.read_text().splitlines(keepends=False):\n one, sep, two = line.partition(\"==\")\n if not sep:\n continue\n name = one.strip()\n version = two.split(\"#\", 1)[0].strip()\n if name and version:\n yield name, version\n\n vendor_txt = Path(\"src/pip/_vendor/vendor.txt\")\n for name, old_version in pinned_requirements(vendor_txt):\n if name == \"setuptools\":\n continue\n\n # update requirements.txt\n session.run(\"vendoring\", \"update\", \".\", name)\n\n # get the updated version\n new_version = old_version\n for inner_name, inner_version in pinned_requirements(vendor_txt):\n if inner_name == name:\n # this is a dedicated assignment, to make flake8 happy\n new_version = inner_version\n break\n else:\n session.error(f\"Could not find {name} in {vendor_txt}\")\n\n # check if the version changed.\n if new_version == old_version:\n continue # no change, nothing more to do here.\n\n # synchronize the contents\n session.run(\"vendoring\", \"sync\", \".\")\n\n # Determine the correct message\n message = f\"Upgrade {name} to {new_version}\"\n\n # Write our news fragment\n news_file = Path(\"news\") / (name + \".vendor.rst\")\n news_file.write_text(message + \"\\n\") # \"\\n\" appeases end-of-line-fixer\n\n # Commit the changes\n release.commit_file(session, \".\", message=message)\n\n\n# -----------------------------------------------------------------------------\n# Release Commands\n# -----------------------------------------------------------------------------\[email protected](name=\"prepare-release\")\ndef prepare_release(session: nox.Session) -> None:\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s prepare-release -- <version>\")\n\n session.log(\"# Ensure nothing is staged\")\n if release.modified_files_in_git(\"--staged\"):\n session.error(\"There are files staged in git\")\n\n session.log(f\"# Updating {AUTHORS_FILE}\")\n release.generate_authors(AUTHORS_FILE)\n if release.modified_files_in_git():\n release.commit_file(session, AUTHORS_FILE, message=f\"Update {AUTHORS_FILE}\")\n else:\n session.log(f\"# No changes to {AUTHORS_FILE}\")\n\n session.log(\"# Generating NEWS\")\n release.generate_news(session, version)\n\n session.log(f\"# Bumping for release {version}\")\n release.update_version_file(version, VERSION_FILE)\n release.commit_file(session, VERSION_FILE, message=\"Bump for release\")\n\n session.log(\"# Tagging release\")\n release.create_git_tag(session, version, message=f\"Release {version}\")\n\n session.log(\"# Bumping for development\")\n next_dev_version = release.get_next_development_version(version)\n release.update_version_file(next_dev_version, VERSION_FILE)\n release.commit_file(session, VERSION_FILE, message=\"Bump for development\")\n\n\[email protected](name=\"build-release\")\ndef build_release(session: nox.Session) -> None:\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s build-release -- YY.N[.P]\")\n\n session.log(\"# Ensure no files in dist/\")\n if release.have_files_in_folder(\"dist\"):\n session.error(\n \"There are files in dist/. Remove them and try again. \"\n \"You can use `git clean -fxdi -- dist` command to do this\"\n )\n\n session.log(\"# Install dependencies\")\n session.install(\"setuptools\", \"wheel\", \"twine\")\n\n with release.isolated_temporary_checkout(session, version) as build_dir:\n session.log(\n \"# Start the build in an isolated, \"\n f\"temporary Git checkout at {build_dir!s}\",\n )\n with release.workdir(session, build_dir):\n tmp_dists = build_dists(session)\n\n tmp_dist_paths = (build_dir / p for p in tmp_dists)\n session.log(f\"# Copying dists from {build_dir}\")\n os.makedirs(\"dist\", exist_ok=True)\n for dist, final in zip(tmp_dist_paths, tmp_dists):\n session.log(f\"# Copying {dist} to {final}\")\n shutil.copy(dist, final)\n\n\ndef build_dists(session: nox.Session) -> List[str]:\n \"\"\"Return dists with valid metadata.\"\"\"\n session.log(\n \"# Check if there's any Git-untracked files before building the wheel\",\n )\n\n has_forbidden_git_untracked_files = any(\n # Don't report the environment this session is running in\n not untracked_file.startswith(\".nox/build-release/\")\n for untracked_file in release.get_git_untracked_files()\n )\n if has_forbidden_git_untracked_files:\n session.error(\n \"There are untracked files in the working directory. \"\n \"Remove them and try again\",\n )\n\n session.log(\"# Build distributions\")\n session.run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", silent=True)\n produced_dists = glob.glob(\"dist/*\")\n\n session.log(f\"# Verify distributions: {', '.join(produced_dists)}\")\n session.run(\"twine\", \"check\", *produced_dists, silent=True)\n\n return produced_dists\n\n\[email protected](name=\"upload-release\")\ndef upload_release(session: nox.Session) -> None:\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s upload-release -- YY.N[.P]\")\n\n session.log(\"# Install dependencies\")\n session.install(\"twine\")\n\n distribution_files = glob.glob(\"dist/*\")\n session.log(f\"# Distribution files: {distribution_files}\")\n\n # Sanity check: Make sure there's 2 distribution files.\n count = len(distribution_files)\n if count != 2:\n session.error(\n f\"Expected 2 distribution files for upload, got {count}. \"\n f\"Remove dist/ and run 'nox -s build-release -- {version}'\"\n )\n # Sanity check: Make sure the files are correctly named.\n distfile_names = (os.path.basename(fn) for fn in distribution_files)\n expected_distribution_files = [\n f\"pip-{version}-py3-none-any.whl\",\n f\"pip-{version}.tar.gz\",\n ]\n if sorted(distfile_names) != sorted(expected_distribution_files):\n session.error(f\"Distribution files do not seem to be for {version} release.\")\n\n session.log(\"# Upload distributions\")\n session.run(\"twine\", \"upload\", *distribution_files)\n", "path": "noxfile.py" } ]
[ { "content": "\"\"\"Automation using nox.\n\"\"\"\n\nimport glob\nimport os\nimport shutil\nimport sys\nfrom pathlib import Path\nfrom typing import Iterator, List, Tuple\n\nimport nox\n\n# fmt: off\nsys.path.append(\".\")\nfrom tools import release # isort:skip # noqa\nsys.path.pop()\n# fmt: on\n\nnox.options.reuse_existing_virtualenvs = True\nnox.options.sessions = [\"lint\"]\n\nLOCATIONS = {\n \"common-wheels\": \"tests/data/common_wheels\",\n \"protected-pip\": \"tools/tox_pip.py\",\n}\nREQUIREMENTS = {\n \"docs\": \"docs/requirements.txt\",\n \"tests\": \"tests/requirements.txt\",\n \"common-wheels\": \"tests/requirements-common_wheels.txt\",\n}\n\nAUTHORS_FILE = \"AUTHORS.txt\"\nVERSION_FILE = \"src/pip/__init__.py\"\n\n\ndef run_with_protected_pip(session: nox.Session, *arguments: str) -> None:\n \"\"\"Do a session.run(\"pip\", *arguments), using a \"protected\" pip.\n\n This invokes a wrapper script, that forwards calls to original virtualenv\n (stable) version, and not the code being tested. This ensures pip being\n used is not the code being tested.\n \"\"\"\n env = {\"VIRTUAL_ENV\": session.virtualenv.location}\n\n command = (\"python\", LOCATIONS[\"protected-pip\"]) + arguments\n session.run(*command, env=env, silent=True)\n\n\ndef should_update_common_wheels() -> bool:\n # If the cache hasn't been created, create it.\n if not os.path.exists(LOCATIONS[\"common-wheels\"]):\n return True\n\n # If the requirements was updated after cache, we'll repopulate it.\n cache_last_populated_at = os.path.getmtime(LOCATIONS[\"common-wheels\"])\n requirements_updated_at = os.path.getmtime(REQUIREMENTS[\"common-wheels\"])\n need_to_repopulate = requirements_updated_at > cache_last_populated_at\n\n # Clear the stale cache.\n if need_to_repopulate:\n shutil.rmtree(LOCATIONS[\"common-wheels\"], ignore_errors=True)\n\n return need_to_repopulate\n\n\n# -----------------------------------------------------------------------------\n# Development Commands\n# These are currently prototypes to evaluate whether we want to switch over\n# completely to nox for all our automation. Contributors should prefer using\n# `tox -e ...` until this note is removed.\n# -----------------------------------------------------------------------------\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy3\"])\ndef test(session: nox.Session) -> None:\n # Get the common wheels.\n if should_update_common_wheels():\n # fmt: off\n run_with_protected_pip(\n session,\n \"wheel\",\n \"-w\", LOCATIONS[\"common-wheels\"],\n \"-r\", REQUIREMENTS[\"common-wheels\"],\n )\n # fmt: on\n else:\n msg = f\"Re-using existing common-wheels at {LOCATIONS['common-wheels']}.\"\n session.log(msg)\n\n # Build source distribution\n sdist_dir = os.path.join(session.virtualenv.location, \"sdist\")\n if os.path.exists(sdist_dir):\n shutil.rmtree(sdist_dir, ignore_errors=True)\n\n # fmt: off\n session.run(\n \"python\", \"setup.py\", \"sdist\", \"--formats=zip\", \"--dist-dir\", sdist_dir,\n silent=True,\n )\n # fmt: on\n\n generated_files = os.listdir(sdist_dir)\n assert len(generated_files) == 1\n generated_sdist = os.path.join(sdist_dir, generated_files[0])\n\n # Install source distribution\n run_with_protected_pip(session, \"install\", generated_sdist)\n\n # Install test dependencies\n run_with_protected_pip(session, \"install\", \"-r\", REQUIREMENTS[\"tests\"])\n\n # Parallelize tests as much as possible, by default.\n arguments = session.posargs or [\"-n\", \"auto\"]\n\n # Run the tests\n # LC_CTYPE is set to get UTF-8 output inside of the subprocesses that our\n # tests use.\n session.run(\"pytest\", *arguments, env={\"LC_CTYPE\": \"en_US.UTF-8\"})\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-e\", \".\")\n session.install(\"-r\", REQUIREMENTS[\"docs\"])\n\n def get_sphinx_build_command(kind: str) -> List[str]:\n # Having the conf.py in the docs/html is weird but needed because we\n # can not use a different configuration directory vs source directory\n # on RTD currently. So, we'll pass \"-c docs/html\" here.\n # See https://github.com/rtfd/readthedocs.org/issues/1543.\n # fmt: off\n return [\n \"sphinx-build\",\n \"-W\",\n \"-c\", \"docs/html\", # see note above\n \"-d\", \"docs/build/doctrees/\" + kind,\n \"-b\", kind,\n \"docs/\" + kind,\n \"docs/build/\" + kind,\n ]\n # fmt: on\n\n session.run(*get_sphinx_build_command(\"html\"))\n session.run(*get_sphinx_build_command(\"man\"))\n\n\[email protected](name=\"docs-live\")\ndef docs_live(session: nox.Session) -> None:\n session.install(\"-e\", \".\")\n session.install(\"-r\", REQUIREMENTS[\"docs\"], \"sphinx-autobuild\")\n\n session.run(\n \"sphinx-autobuild\",\n \"-d=docs/build/doctrees/livehtml\",\n \"-b=dirhtml\",\n \"docs/html\",\n \"docs/build/livehtml\",\n *session.posargs,\n )\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n\n if session.posargs:\n args = session.posargs + [\"--all-files\"]\n else:\n args = [\"--all-files\", \"--show-diff-on-failure\"]\n\n session.run(\"pre-commit\", \"run\", *args)\n\n\[email protected]\ndef vendoring(session: nox.Session) -> None:\n session.install(\"vendoring~=1.2.0\")\n\n if \"--upgrade\" not in session.posargs:\n session.run(\"vendoring\", \"sync\", \"-v\")\n return\n\n def pinned_requirements(path: Path) -> Iterator[Tuple[str, str]]:\n for line in path.read_text().splitlines(keepends=False):\n one, sep, two = line.partition(\"==\")\n if not sep:\n continue\n name = one.strip()\n version = two.split(\"#\", 1)[0].strip()\n if name and version:\n yield name, version\n\n vendor_txt = Path(\"src/pip/_vendor/vendor.txt\")\n for name, old_version in pinned_requirements(vendor_txt):\n if name == \"setuptools\":\n continue\n\n # update requirements.txt\n session.run(\"vendoring\", \"update\", \".\", name)\n\n # get the updated version\n new_version = old_version\n for inner_name, inner_version in pinned_requirements(vendor_txt):\n if inner_name == name:\n # this is a dedicated assignment, to make flake8 happy\n new_version = inner_version\n break\n else:\n session.error(f\"Could not find {name} in {vendor_txt}\")\n\n # check if the version changed.\n if new_version == old_version:\n continue # no change, nothing more to do here.\n\n # synchronize the contents\n session.run(\"vendoring\", \"sync\", \".\")\n\n # Determine the correct message\n message = f\"Upgrade {name} to {new_version}\"\n\n # Write our news fragment\n news_file = Path(\"news\") / (name + \".vendor.rst\")\n news_file.write_text(message + \"\\n\") # \"\\n\" appeases end-of-line-fixer\n\n # Commit the changes\n release.commit_file(session, \".\", message=message)\n\n\n# -----------------------------------------------------------------------------\n# Release Commands\n# -----------------------------------------------------------------------------\[email protected](name=\"prepare-release\")\ndef prepare_release(session: nox.Session) -> None:\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s prepare-release -- <version>\")\n\n session.log(\"# Ensure nothing is staged\")\n if release.modified_files_in_git(\"--staged\"):\n session.error(\"There are files staged in git\")\n\n session.log(f\"# Updating {AUTHORS_FILE}\")\n release.generate_authors(AUTHORS_FILE)\n if release.modified_files_in_git():\n release.commit_file(session, AUTHORS_FILE, message=f\"Update {AUTHORS_FILE}\")\n else:\n session.log(f\"# No changes to {AUTHORS_FILE}\")\n\n session.log(\"# Generating NEWS\")\n release.generate_news(session, version)\n\n session.log(f\"# Bumping for release {version}\")\n release.update_version_file(version, VERSION_FILE)\n release.commit_file(session, VERSION_FILE, message=\"Bump for release\")\n\n session.log(\"# Tagging release\")\n release.create_git_tag(session, version, message=f\"Release {version}\")\n\n session.log(\"# Bumping for development\")\n next_dev_version = release.get_next_development_version(version)\n release.update_version_file(next_dev_version, VERSION_FILE)\n release.commit_file(session, VERSION_FILE, message=\"Bump for development\")\n\n\[email protected](name=\"build-release\")\ndef build_release(session: nox.Session) -> None:\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s build-release -- YY.N[.P]\")\n\n session.log(\"# Ensure no files in dist/\")\n if release.have_files_in_folder(\"dist\"):\n session.error(\n \"There are files in dist/. Remove them and try again. \"\n \"You can use `git clean -fxdi -- dist` command to do this\"\n )\n\n session.log(\"# Install dependencies\")\n session.install(\"setuptools\", \"wheel\", \"twine\")\n\n with release.isolated_temporary_checkout(session, version) as build_dir:\n session.log(\n \"# Start the build in an isolated, \"\n f\"temporary Git checkout at {build_dir!s}\",\n )\n with release.workdir(session, build_dir):\n tmp_dists = build_dists(session)\n\n tmp_dist_paths = (build_dir / p for p in tmp_dists)\n session.log(f\"# Copying dists from {build_dir}\")\n os.makedirs(\"dist\", exist_ok=True)\n for dist, final in zip(tmp_dist_paths, tmp_dists):\n session.log(f\"# Copying {dist} to {final}\")\n shutil.copy(dist, final)\n\n\ndef build_dists(session: nox.Session) -> List[str]:\n \"\"\"Return dists with valid metadata.\"\"\"\n session.log(\n \"# Check if there's any Git-untracked files before building the wheel\",\n )\n\n has_forbidden_git_untracked_files = any(\n # Don't report the environment this session is running in\n not untracked_file.startswith(\".nox/build-release/\")\n for untracked_file in release.get_git_untracked_files()\n )\n if has_forbidden_git_untracked_files:\n session.error(\n \"There are untracked files in the working directory. \"\n \"Remove them and try again\",\n )\n\n session.log(\"# Build distributions\")\n session.run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", silent=True)\n produced_dists = glob.glob(\"dist/*\")\n\n session.log(f\"# Verify distributions: {', '.join(produced_dists)}\")\n session.run(\"twine\", \"check\", *produced_dists, silent=True)\n\n return produced_dists\n\n\[email protected](name=\"upload-release\")\ndef upload_release(session: nox.Session) -> None:\n version = release.get_version_from_arguments(session)\n if not version:\n session.error(\"Usage: nox -s upload-release -- YY.N[.P]\")\n\n session.log(\"# Install dependencies\")\n session.install(\"twine\")\n\n distribution_files = glob.glob(\"dist/*\")\n session.log(f\"# Distribution files: {distribution_files}\")\n\n # Sanity check: Make sure there's 2 distribution files.\n count = len(distribution_files)\n if count != 2:\n session.error(\n f\"Expected 2 distribution files for upload, got {count}. \"\n f\"Remove dist/ and run 'nox -s build-release -- {version}'\"\n )\n # Sanity check: Make sure the files are correctly named.\n distfile_names = (os.path.basename(fn) for fn in distribution_files)\n expected_distribution_files = [\n f\"pip-{version}-py3-none-any.whl\",\n f\"pip-{version}.tar.gz\",\n ]\n if sorted(distfile_names) != sorted(expected_distribution_files):\n session.error(f\"Distribution files do not seem to be for {version} release.\")\n\n session.log(\"# Upload distributions\")\n session.run(\"twine\", \"upload\", *distribution_files)\n", "path": "noxfile.py" } ]
diff --git a/noxfile.py b/noxfile.py index df42af8b8f5..5b5a66d5307 100644 --- a/noxfile.py +++ b/noxfile.py @@ -171,7 +171,7 @@ def lint(session: nox.Session) -> None: @nox.session def vendoring(session: nox.Session) -> None: - session.install("vendoring~=1.0.0") + session.install("vendoring~=1.2.0") if "--upgrade" not in session.posargs: session.run("vendoring", "sync", "-v") diff --git a/pyproject.toml b/pyproject.toml index fac27944798..9bb5900d0e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,4 +54,6 @@ distro = [] setuptools = "pkg_resources" [tool.vendoring.license.fallback-urls] +CacheControl = "https://raw.githubusercontent.com/ionrock/cachecontrol/v0.12.6/LICENSE.txt" +distlib = "https://bitbucket.org/pypa/distlib/raw/master/LICENSE.txt" webencodings = "https://github.com/SimonSapin/python-webencodings/raw/master/LICENSE" diff --git a/src/pip/_vendor/vendor.txt b/src/pip/_vendor/vendor.txt index 0b74c2bacc2..1b5829a038a 100644 --- a/src/pip/_vendor/vendor.txt +++ b/src/pip/_vendor/vendor.txt @@ -1,4 +1,4 @@ -CacheControl==0.12.6 +CacheControl==0.12.6 # Make sure to update the license in pyproject.toml for this. colorama==0.4.4 distlib==0.3.3 distro==1.6.0 diff --git a/tox.ini b/tox.ini index 23738ad1ae5..9063c3ac340 100644 --- a/tox.ini +++ b/tox.ini @@ -70,10 +70,7 @@ basepython = python3 skip_install = True commands_pre = deps = - vendoring~=1.0.0 - # Required, otherwise we interpret --no-binary :all: as - # "do not build wheels", which fails for PEP 517 requirements - pip>=19.3.1 + vendoring~=1.2.0 whitelist_externals = git commands = # Check that the vendoring is up-to-date
iterative__dvc-4826
Unexpected error on `dvc diff` ## Bug Report When running `dvc diff staging`, I got a KeyError, here is the traceback: ``` Traceback (most recent call last): File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/main.py", line 76, in main ret = cmd.run() File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/command/diff.py", line 130, in run diff = self.repo.diff(self.args.a_rev, self.args.b_rev) File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/repo/__init__.py", line 54, in wrapper return f(repo, *args, **kwargs) File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/repo/diff.py", line 43, in diff missing = sorted(_filter_missing(self, deleted_or_missing)) File "/home/ubuntu/.local/share/virtualenvs/speech-api-EI_ft4iY/lib/python3.7/site-packages/dvc/repo/diff.py", line 125, in _filter_missing if out.status()[str(out)] == "not in cache": KeyError: 'data/KPI/KPI_from_dvc/en/post_probs' ``` It only happens when I give a specific revision. Any ideas? Could it be my data? ### Please provide information about your setup **Output of `dvc version`:** ```console $ dvc version DVC version: 1.9.1 (pip) --------------------------------- Platform: Python 3.7.3 on Linux-5.4.0-1029-aws-x86_64-with-debian-buster-sid Supports: http, https, s3 Cache types: hardlink, symlink Cache directory: ext4 on /dev/nvme0n1p1 Workspace directory: ext4 on /dev/nvme0n1p1 Repo: dvc, git ```
[ { "content": "import logging\nimport os\n\nfrom dvc.repo import locked\nfrom dvc.tree.local import LocalTree\nfrom dvc.tree.repo import RepoTree\n\nlogger = logging.getLogger(__name__)\n\n\n@locked\ndef diff(self, a_rev=\"HEAD\", b_rev=None):\n \"\"\"\n By default, it compares the workspace with the last commit's tree.\n\n This implementation differs from `git diff` since DVC doesn't have\n the concept of `index`, but it keeps the same interface, thus,\n `dvc diff` would be the same as `dvc diff HEAD`.\n \"\"\"\n\n if self.scm.no_commits:\n return {}\n\n b_rev = b_rev if b_rev else \"workspace\"\n results = {}\n for rev in self.brancher(revs=[a_rev, b_rev]):\n if rev == \"workspace\" and rev != b_rev:\n # brancher always returns workspace, but we only need to compute\n # workspace paths/checksums if b_rev was None\n continue\n results[rev] = _paths_checksums(self)\n\n old = results[a_rev]\n new = results[b_rev]\n\n # Compare paths between the old and new tree.\n # set() efficiently converts dict keys to a set\n added = sorted(set(new) - set(old))\n deleted_or_missing = set(old) - set(new)\n if b_rev == \"workspace\":\n # missing status is only applicable when diffing local workspace\n # against a commit\n missing = sorted(_filter_missing(self, deleted_or_missing))\n else:\n missing = []\n deleted = sorted(deleted_or_missing - set(missing))\n modified = sorted(set(old) & set(new))\n\n ret = {\n \"added\": [{\"path\": path, \"hash\": new[path]} for path in added],\n \"deleted\": [{\"path\": path, \"hash\": old[path]} for path in deleted],\n \"modified\": [\n {\"path\": path, \"hash\": {\"old\": old[path], \"new\": new[path]}}\n for path in modified\n if old[path] != new[path]\n ],\n \"not in cache\": [\n {\"path\": path, \"hash\": old[path]} for path in missing\n ],\n }\n\n return ret if any(ret.values()) else {}\n\n\ndef _paths_checksums(repo):\n \"\"\"\n A dictionary of checksums addressed by relpaths collected from\n the current tree outputs.\n\n To help distinguish between a directory and a file output,\n the former one will come with a trailing slash in the path:\n\n directory: \"data/\"\n file: \"data\"\n \"\"\"\n\n return dict(_output_paths(repo))\n\n\ndef _output_paths(repo):\n repo_tree = RepoTree(repo, stream=True)\n on_working_tree = isinstance(repo.tree, LocalTree)\n\n def _exists(output):\n if on_working_tree:\n return output.exists\n return True\n\n def _to_path(output):\n return (\n str(output)\n if not output.is_dir_checksum\n else os.path.join(str(output), \"\")\n )\n\n def _to_checksum(output):\n if on_working_tree:\n return repo.cache.local.tree.get_hash(output.path_info).value\n return output.hash_info.value\n\n for stage in repo.stages:\n for output in stage.outs:\n if _exists(output):\n yield _to_path(output), _to_checksum(output)\n if output.is_dir_checksum:\n yield from _dir_output_paths(repo_tree, output)\n\n\ndef _dir_output_paths(repo_tree, output):\n from dvc.config import NoRemoteError\n\n try:\n for fname in repo_tree.walk_files(output.path_info):\n yield str(fname), repo_tree.get_file_hash(fname).value\n except NoRemoteError:\n logger.warning(\"dir cache entry for '%s' is missing\", output)\n\n\ndef _filter_missing(repo, paths):\n repo_tree = RepoTree(repo, stream=True)\n for path in paths:\n metadata = repo_tree.metadata(path)\n if metadata.is_dvc:\n out = metadata.outs[0]\n if out.status()[str(out)] == \"not in cache\":\n yield path\n", "path": "dvc/repo/diff.py" } ]
[ { "content": "import logging\nimport os\n\nfrom dvc.repo import locked\nfrom dvc.tree.local import LocalTree\nfrom dvc.tree.repo import RepoTree\n\nlogger = logging.getLogger(__name__)\n\n\n@locked\ndef diff(self, a_rev=\"HEAD\", b_rev=None):\n \"\"\"\n By default, it compares the workspace with the last commit's tree.\n\n This implementation differs from `git diff` since DVC doesn't have\n the concept of `index`, but it keeps the same interface, thus,\n `dvc diff` would be the same as `dvc diff HEAD`.\n \"\"\"\n\n if self.scm.no_commits:\n return {}\n\n b_rev = b_rev if b_rev else \"workspace\"\n results = {}\n for rev in self.brancher(revs=[a_rev, b_rev]):\n if rev == \"workspace\" and rev != b_rev:\n # brancher always returns workspace, but we only need to compute\n # workspace paths/checksums if b_rev was None\n continue\n results[rev] = _paths_checksums(self)\n\n old = results[a_rev]\n new = results[b_rev]\n\n # Compare paths between the old and new tree.\n # set() efficiently converts dict keys to a set\n added = sorted(set(new) - set(old))\n deleted_or_missing = set(old) - set(new)\n if b_rev == \"workspace\":\n # missing status is only applicable when diffing local workspace\n # against a commit\n missing = sorted(_filter_missing(self, deleted_or_missing))\n else:\n missing = []\n deleted = sorted(deleted_or_missing - set(missing))\n modified = sorted(set(old) & set(new))\n\n ret = {\n \"added\": [{\"path\": path, \"hash\": new[path]} for path in added],\n \"deleted\": [{\"path\": path, \"hash\": old[path]} for path in deleted],\n \"modified\": [\n {\"path\": path, \"hash\": {\"old\": old[path], \"new\": new[path]}}\n for path in modified\n if old[path] != new[path]\n ],\n \"not in cache\": [\n {\"path\": path, \"hash\": old[path]} for path in missing\n ],\n }\n\n return ret if any(ret.values()) else {}\n\n\ndef _paths_checksums(repo):\n \"\"\"\n A dictionary of checksums addressed by relpaths collected from\n the current tree outputs.\n\n To help distinguish between a directory and a file output,\n the former one will come with a trailing slash in the path:\n\n directory: \"data/\"\n file: \"data\"\n \"\"\"\n\n return dict(_output_paths(repo))\n\n\ndef _output_paths(repo):\n repo_tree = RepoTree(repo, stream=True)\n on_working_tree = isinstance(repo.tree, LocalTree)\n\n def _exists(output):\n if on_working_tree:\n return output.exists\n return True\n\n def _to_path(output):\n return (\n str(output)\n if not output.is_dir_checksum\n else os.path.join(str(output), \"\")\n )\n\n def _to_checksum(output):\n if on_working_tree:\n return repo.cache.local.tree.get_hash(output.path_info).value\n return output.hash_info.value\n\n for stage in repo.stages:\n for output in stage.outs:\n if _exists(output):\n yield _to_path(output), _to_checksum(output)\n if output.is_dir_checksum:\n yield from _dir_output_paths(repo_tree, output)\n\n\ndef _dir_output_paths(repo_tree, output):\n from dvc.config import NoRemoteError\n\n try:\n for fname in repo_tree.walk_files(output.path_info):\n yield str(fname), repo_tree.get_file_hash(fname).value\n except NoRemoteError:\n logger.warning(\"dir cache entry for '%s' is missing\", output)\n\n\ndef _filter_missing(repo, paths):\n repo_tree = RepoTree(repo, stream=True)\n for path in paths:\n metadata = repo_tree.metadata(path)\n if metadata.is_dvc:\n out = metadata.outs[0]\n if out.status().get(str(out)) == \"not in cache\":\n yield path\n", "path": "dvc/repo/diff.py" } ]
diff --git a/dvc/repo/diff.py b/dvc/repo/diff.py index cb5857a60b..ba7a4c44ca 100644 --- a/dvc/repo/diff.py +++ b/dvc/repo/diff.py @@ -122,5 +122,5 @@ def _filter_missing(repo, paths): metadata = repo_tree.metadata(path) if metadata.is_dvc: out = metadata.outs[0] - if out.status()[str(out)] == "not in cache": + if out.status().get(str(out)) == "not in cache": yield path
python-poetry__poetry-3743
Poetry install -q (and update -q) produce messages <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate. - [ ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). - **OS version and name**: Xubuntu 20.04 - **Poetry version**: 1.1.2 - **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/berislavlopac/949972163f24f734ea84c27fbb27b2f4 ## Issue Running `poetry update -q` and `poetry install -q` produces output, although somewhat more limited than normally. ``` ~/D/test ❯❯❯ (test) poetry install Updating dependencies Resolving dependencies... (0.3s) Writing lock file Package operations: 6 installs, 0 updates, 0 removals • Installing certifi (2020.6.20) • Installing chardet (3.0.4) • Installing idna (2.10) • Installing urllib3 (1.25.10) • Installing requests (2.24.0) • Installing starlette (0.13.8) ``` ``` ~/D/test ❯❯❯ (test) poetry install -q • Installing certifi (2020.6.20) • Installing chardet (3.0.4) • Installing idna (2.10) • Installing urllib3 (1.25.10) • Installing requests (2.24.0) • Installing starlette (0.13.8) ``` Using multiple `q`s (`poetry install -qq`) has the same result as none at all. I was expecting no messages at all, as was the case in earlier versions.
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nimport itertools\nimport os\nimport threading\n\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import wait\nfrom pathlib import Path\nfrom subprocess import CalledProcessError\nfrom typing import TYPE_CHECKING\nfrom typing import Any\nfrom typing import List\nfrom typing import Union\n\nfrom cleo.io.null_io import NullIO\n\nfrom poetry.core.packages.file_dependency import FileDependency\nfrom poetry.core.packages.utils.link import Link\nfrom poetry.core.pyproject.toml import PyProjectTOML\nfrom poetry.utils._compat import decode\nfrom poetry.utils.env import EnvCommandError\nfrom poetry.utils.helpers import safe_rmtree\n\nfrom .authenticator import Authenticator\nfrom .chef import Chef\nfrom .chooser import Chooser\nfrom .operations.install import Install\nfrom .operations.operation import Operation\nfrom .operations.uninstall import Uninstall\nfrom .operations.update import Update\n\n\nif TYPE_CHECKING:\n from cleo.io.io import IO # noqa\n\n from poetry.config.config import Config\n from poetry.repositories import Pool\n from poetry.utils.env import Env\n\n from .operations import OperationTypes\n\n\nclass Executor(object):\n def __init__(\n self,\n env: \"Env\",\n pool: \"Pool\",\n config: \"Config\",\n io: \"IO\",\n parallel: bool = None,\n ) -> None:\n self._env = env\n self._io = io\n self._dry_run = False\n self._enabled = True\n self._verbose = False\n self._authenticator = Authenticator(config, self._io)\n self._chef = Chef(config, self._env)\n self._chooser = Chooser(pool, self._env)\n\n if parallel is None:\n parallel = config.get(\"installer.parallel\", True)\n\n if parallel:\n # This should be directly handled by ThreadPoolExecutor\n # however, on some systems the number of CPUs cannot be determined\n # (it raises a NotImplementedError), so, in this case, we assume\n # that the system only has one CPU.\n try:\n self._max_workers = os.cpu_count() + 4\n except NotImplementedError:\n self._max_workers = 5\n else:\n self._max_workers = 1\n\n self._executor = ThreadPoolExecutor(max_workers=self._max_workers)\n self._total_operations = 0\n self._executed_operations = 0\n self._executed = {\"install\": 0, \"update\": 0, \"uninstall\": 0}\n self._skipped = {\"install\": 0, \"update\": 0, \"uninstall\": 0}\n self._sections = dict()\n self._lock = threading.Lock()\n self._shutdown = False\n\n @property\n def installations_count(self) -> int:\n return self._executed[\"install\"]\n\n @property\n def updates_count(self) -> int:\n return self._executed[\"update\"]\n\n @property\n def removals_count(self) -> int:\n return self._executed[\"uninstall\"]\n\n def supports_fancy_output(self) -> bool:\n return self._io.output.is_decorated() and not self._dry_run\n\n def disable(self) -> \"Executor\":\n self._enabled = False\n\n return self\n\n def dry_run(self, dry_run: bool = True) -> \"Executor\":\n self._dry_run = dry_run\n\n return self\n\n def verbose(self, verbose: bool = True) -> \"Executor\":\n self._verbose = verbose\n\n return self\n\n def execute(self, operations: List[\"OperationTypes\"]) -> int:\n self._total_operations = len(operations)\n for job_type in self._executed:\n self._executed[job_type] = 0\n self._skipped[job_type] = 0\n\n if operations and (self._enabled or self._dry_run):\n self._display_summary(operations)\n\n # We group operations by priority\n groups = itertools.groupby(operations, key=lambda o: -o.priority)\n self._sections = dict()\n for _, group in groups:\n tasks = []\n serial_operations = []\n for operation in group:\n if self._shutdown:\n break\n\n # Some operations are unsafe, we must execute them serially in a group\n # https://github.com/python-poetry/poetry/issues/3086\n # https://github.com/python-poetry/poetry/issues/2658\n #\n # We need to explicitly check source type here, see:\n # https://github.com/python-poetry/poetry-core/pull/98\n is_parallel_unsafe = operation.job_type == \"uninstall\" or (\n operation.package.develop\n and operation.package.source_type in {\"directory\", \"git\"}\n )\n if not operation.skipped and is_parallel_unsafe:\n serial_operations.append(operation)\n continue\n\n tasks.append(self._executor.submit(self._execute_operation, operation))\n\n try:\n wait(tasks)\n\n for operation in serial_operations:\n wait([self._executor.submit(self._execute_operation, operation)])\n\n except KeyboardInterrupt:\n self._shutdown = True\n\n if self._shutdown:\n # Cancelling further tasks from being executed\n [task.cancel() for task in tasks]\n self._executor.shutdown(wait=True)\n\n break\n\n return 1 if self._shutdown else 0\n\n def _write(self, operation: \"OperationTypes\", line: str) -> None:\n if not self.supports_fancy_output() or not self._should_write_operation(\n operation\n ):\n return\n\n if self._io.is_debug():\n with self._lock:\n section = self._sections[id(operation)]\n section.write_line(line)\n\n return\n\n with self._lock:\n section = self._sections[id(operation)]\n section.clear()\n section.write(line)\n\n def _execute_operation(self, operation: \"OperationTypes\") -> None:\n try:\n if self.supports_fancy_output():\n if id(operation) not in self._sections:\n if self._should_write_operation(operation):\n with self._lock:\n self._sections[id(operation)] = self._io.section()\n self._sections[id(operation)].write_line(\n \" <fg=blue;options=bold>•</> {message}: <fg=blue>Pending...</>\".format(\n message=self.get_operation_message(operation),\n ),\n )\n else:\n if self._should_write_operation(operation):\n if not operation.skipped:\n self._io.write_line(\n \" <fg=blue;options=bold>•</> {message}\".format(\n message=self.get_operation_message(operation),\n ),\n )\n else:\n self._io.write_line(\n \" <fg=default;options=bold,dark>•</> {message}: \"\n \"<fg=default;options=bold,dark>Skipped</> \"\n \"<fg=default;options=dark>for the following reason:</> \"\n \"<fg=default;options=bold,dark>{reason}</>\".format(\n message=self.get_operation_message(operation),\n reason=operation.skip_reason,\n )\n )\n\n try:\n result = self._do_execute_operation(operation)\n except EnvCommandError as e:\n if e.e.returncode == -2:\n result = -2\n else:\n raise\n\n # If we have a result of -2 it means a KeyboardInterrupt\n # in the any python subprocess, so we raise a KeyboardInterrupt\n # error to be picked up by the error handler.\n if result == -2:\n raise KeyboardInterrupt\n except Exception as e:\n try:\n from cleo.ui.exception_trace import ExceptionTrace\n\n if not self.supports_fancy_output():\n io = self._io\n else:\n message = (\n \" <error>•</error> {message}: <error>Failed</error>\".format(\n message=self.get_operation_message(operation, error=True),\n )\n )\n self._write(operation, message)\n io = self._sections.get(id(operation), self._io)\n\n with self._lock:\n trace = ExceptionTrace(e)\n trace.render(io)\n io.write_line(\"\")\n finally:\n with self._lock:\n self._shutdown = True\n except KeyboardInterrupt:\n try:\n message = \" <warning>•</warning> {message}: <warning>Cancelled</warning>\".format(\n message=self.get_operation_message(operation, warning=True),\n )\n if not self.supports_fancy_output():\n self._io.write_line(message)\n else:\n self._write(operation, message)\n finally:\n with self._lock:\n self._shutdown = True\n\n def _do_execute_operation(self, operation: \"OperationTypes\") -> int:\n method = operation.job_type\n\n operation_message = self.get_operation_message(operation)\n if operation.skipped:\n if self.supports_fancy_output():\n self._write(\n operation,\n \" <fg=default;options=bold,dark>•</> {message}: \"\n \"<fg=default;options=bold,dark>Skipped</> \"\n \"<fg=default;options=dark>for the following reason:</> \"\n \"<fg=default;options=bold,dark>{reason}</>\".format(\n message=operation_message,\n reason=operation.skip_reason,\n ),\n )\n\n self._skipped[operation.job_type] += 1\n\n return 0\n\n if not self._enabled or self._dry_run:\n self._io.write_line(\n \" <fg=blue;options=bold>•</> {message}\".format(\n message=operation_message,\n )\n )\n\n return 0\n\n result = getattr(self, \"_execute_{}\".format(method))(operation)\n\n if result != 0:\n return result\n\n message = \" <fg=green;options=bold>•</> {message}\".format(\n message=self.get_operation_message(operation, done=True),\n )\n self._write(operation, message)\n\n self._increment_operations_count(operation, True)\n\n return result\n\n def _increment_operations_count(\n self, operation: \"OperationTypes\", executed: bool\n ) -> None:\n with self._lock:\n if executed:\n self._executed_operations += 1\n self._executed[operation.job_type] += 1\n else:\n self._skipped[operation.job_type] += 1\n\n def run_pip(self, *args: Any, **kwargs: Any) -> int:\n try:\n self._env.run_pip(*args, **kwargs)\n except EnvCommandError as e:\n output = decode(e.e.output)\n if (\n \"KeyboardInterrupt\" in output\n or \"ERROR: Operation cancelled by user\" in output\n ):\n return -2\n\n raise\n\n return 0\n\n def get_operation_message(\n self,\n operation: \"OperationTypes\",\n done: bool = False,\n error: bool = False,\n warning: bool = False,\n ) -> str:\n base_tag = \"fg=default\"\n operation_color = \"c2\"\n source_operation_color = \"c2\"\n package_color = \"c1\"\n\n if error:\n operation_color = \"error\"\n elif warning:\n operation_color = \"warning\"\n elif done:\n operation_color = \"success\"\n\n if operation.skipped:\n base_tag = \"fg=default;options=dark\"\n operation_color += \"_dark\"\n source_operation_color += \"_dark\"\n package_color += \"_dark\"\n\n if operation.job_type == \"install\":\n return \"<{}>Installing <{}>{}</{}> (<{}>{}</>)</>\".format(\n base_tag,\n package_color,\n operation.package.name,\n package_color,\n operation_color,\n operation.package.full_pretty_version,\n )\n\n if operation.job_type == \"uninstall\":\n return \"<{}>Removing <{}>{}</{}> (<{}>{}</>)</>\".format(\n base_tag,\n package_color,\n operation.package.name,\n package_color,\n operation_color,\n operation.package.full_pretty_version,\n )\n\n if operation.job_type == \"update\":\n return \"<{}>Updating <{}>{}</{}> (<{}>{}</{}> -> <{}>{}</>)</>\".format(\n base_tag,\n package_color,\n operation.initial_package.name,\n package_color,\n source_operation_color,\n operation.initial_package.full_pretty_version,\n source_operation_color,\n operation_color,\n operation.target_package.full_pretty_version,\n )\n\n return \"\"\n\n def _display_summary(self, operations: List[\"OperationTypes\"]) -> None:\n installs = 0\n updates = 0\n uninstalls = 0\n skipped = 0\n for op in operations:\n if op.skipped:\n skipped += 1\n continue\n\n if op.job_type == \"install\":\n installs += 1\n elif op.job_type == \"update\":\n updates += 1\n elif op.job_type == \"uninstall\":\n uninstalls += 1\n\n if not installs and not updates and not uninstalls and not self._verbose:\n self._io.write_line(\"\")\n self._io.write_line(\"No dependencies to install or update\")\n\n return\n\n self._io.write_line(\"\")\n self._io.write_line(\n \"<b>Package operations</b>: \"\n \"<info>{}</> install{}, \"\n \"<info>{}</> update{}, \"\n \"<info>{}</> removal{}\"\n \"{}\".format(\n installs,\n \"\" if installs == 1 else \"s\",\n updates,\n \"\" if updates == 1 else \"s\",\n uninstalls,\n \"\" if uninstalls == 1 else \"s\",\n \", <info>{}</> skipped\".format(skipped)\n if skipped and self._verbose\n else \"\",\n )\n )\n self._io.write_line(\"\")\n\n def _execute_install(self, operation: Union[Install, Update]) -> int:\n return self._install(operation)\n\n def _execute_update(self, operation: Union[Install, Update]) -> int:\n return self._update(operation)\n\n def _execute_uninstall(self, operation: Uninstall) -> int:\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Removing...</info>\".format(\n message=self.get_operation_message(operation),\n )\n )\n self._write(operation, message)\n\n return self._remove(operation)\n\n def _install(self, operation: Union[Install, Update]) -> int:\n package = operation.package\n if package.source_type == \"directory\":\n return self._install_directory(operation)\n\n if package.source_type == \"git\":\n return self._install_git(operation)\n\n if package.source_type == \"file\":\n archive = self._prepare_file(operation)\n elif package.source_type == \"url\":\n archive = self._download_link(operation, Link(package.source_url))\n else:\n archive = self._download(operation)\n\n operation_message = self.get_operation_message(operation)\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Installing...</info>\".format(\n message=operation_message,\n )\n )\n self._write(operation, message)\n\n args = [\"install\", \"--no-deps\", str(archive)]\n if operation.job_type == \"update\":\n args.insert(2, \"-U\")\n\n return self.run_pip(*args)\n\n def _update(self, operation: Union[Install, Update]) -> int:\n return self._install(operation)\n\n def _remove(self, operation: Uninstall) -> int:\n package = operation.package\n\n # If we have a VCS package, remove its source directory\n if package.source_type == \"git\":\n src_dir = self._env.path / \"src\" / package.name\n if src_dir.exists():\n safe_rmtree(str(src_dir))\n\n try:\n return self.run_pip(\"uninstall\", package.name, \"-y\")\n except CalledProcessError as e:\n if \"not installed\" in str(e):\n return 0\n\n raise\n\n def _prepare_file(self, operation: Union[Install, Update]) -> Path:\n package = operation.package\n\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Preparing...</info>\".format(\n message=self.get_operation_message(operation),\n )\n )\n self._write(operation, message)\n\n archive = Path(package.source_url)\n if not Path(package.source_url).is_absolute() and package.root_dir:\n archive = package.root_dir / archive\n\n archive = self._chef.prepare(archive)\n\n return archive\n\n def _install_directory(self, operation: Union[Install, Update]) -> int:\n from poetry.factory import Factory\n\n package = operation.package\n operation_message = self.get_operation_message(operation)\n\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Building...</info>\".format(\n message=operation_message,\n )\n )\n self._write(operation, message)\n\n if package.root_dir:\n req = os.path.join(str(package.root_dir), package.source_url)\n else:\n req = os.path.realpath(package.source_url)\n\n args = [\"install\", \"--no-deps\", \"-U\"]\n\n pyproject = PyProjectTOML(os.path.join(req, \"pyproject.toml\"))\n\n if pyproject.is_poetry_project():\n # Even if there is a build system specified\n # some versions of pip (< 19.0.0) don't understand it\n # so we need to check the version of pip to know\n # if we can rely on the build system\n legacy_pip = self._env.pip_version < self._env.pip_version.__class__(\n 19, 0, 0\n )\n package_poetry = Factory().create_poetry(pyproject.file.path.parent)\n\n if package.develop and not package_poetry.package.build_script:\n from poetry.masonry.builders.editable import EditableBuilder\n\n # This is a Poetry package in editable mode\n # we can use the EditableBuilder without going through pip\n # to install it, unless it has a build script.\n builder = EditableBuilder(package_poetry, self._env, NullIO())\n builder.build()\n\n return 0\n elif legacy_pip or package_poetry.package.build_script:\n from poetry.core.masonry.builders.sdist import SdistBuilder\n\n # We need to rely on creating a temporary setup.py\n # file since the version of pip does not support\n # build-systems\n # We also need it for non-PEP-517 packages\n builder = SdistBuilder(package_poetry)\n\n with builder.setup_py():\n if package.develop:\n args.append(\"-e\")\n\n args.append(req)\n\n return self.run_pip(*args)\n\n if package.develop:\n args.append(\"-e\")\n\n args.append(req)\n\n return self.run_pip(*args)\n\n def _install_git(self, operation: Union[Install, Update]) -> int:\n from poetry.core.vcs import Git\n\n package = operation.package\n operation_message = self.get_operation_message(operation)\n\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Cloning...</info>\".format(\n message=operation_message,\n )\n )\n self._write(operation, message)\n\n src_dir = self._env.path / \"src\" / package.name\n if src_dir.exists():\n safe_rmtree(str(src_dir))\n\n src_dir.parent.mkdir(exist_ok=True)\n\n git = Git()\n git.clone(package.source_url, src_dir)\n git.checkout(package.source_reference, src_dir)\n\n # Now we just need to install from the source directory\n package._source_url = str(src_dir)\n\n return self._install_directory(operation)\n\n def _download(self, operation: Union[Install, Update]) -> Link:\n link = self._chooser.choose_for(operation.package)\n\n return self._download_link(operation, link)\n\n def _download_link(self, operation: Union[Install, Update], link: Link) -> Link:\n package = operation.package\n\n archive = self._chef.get_cached_archive_for_link(link)\n if archive is link:\n # No cached distributions was found, so we download and prepare it\n try:\n archive = self._download_archive(operation, link)\n except BaseException:\n cache_directory = self._chef.get_cache_directory_for_link(link)\n cached_file = cache_directory.joinpath(link.filename)\n # We can't use unlink(missing_ok=True) because it's not available\n # in pathlib2 for Python 2.7\n if cached_file.exists():\n cached_file.unlink()\n\n raise\n\n # TODO: Check readability of the created archive\n\n if not link.is_wheel:\n archive = self._chef.prepare(archive)\n\n if package.files:\n archive_hash = \"sha256:\" + FileDependency(package.name, archive).hash()\n if archive_hash not in {f[\"hash\"] for f in package.files}:\n raise RuntimeError(\n \"Invalid hash for {} using archive {}\".format(package, archive.name)\n )\n\n return archive\n\n def _download_archive(self, operation: Union[Install, Update], link: Link) -> Path:\n response = self._authenticator.request(\n \"get\", link.url, stream=True, io=self._sections.get(id(operation), self._io)\n )\n wheel_size = response.headers.get(\"content-length\")\n operation_message = self.get_operation_message(operation)\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Downloading...</>\".format(\n message=operation_message,\n )\n )\n progress = None\n if self.supports_fancy_output():\n if wheel_size is None:\n self._write(operation, message)\n else:\n from cleo.ui.progress_bar import ProgressBar\n\n progress = ProgressBar(\n self._sections[id(operation)], max=int(wheel_size)\n )\n progress.set_format(message + \" <b>%percent%%</b>\")\n\n if progress:\n with self._lock:\n progress.start()\n\n done = 0\n archive = self._chef.get_cache_directory_for_link(link) / link.filename\n archive.parent.mkdir(parents=True, exist_ok=True)\n with archive.open(\"wb\") as f:\n for chunk in response.iter_content(chunk_size=4096):\n if not chunk:\n break\n\n done += len(chunk)\n\n if progress:\n with self._lock:\n progress.set_progress(done)\n\n f.write(chunk)\n\n if progress:\n with self._lock:\n progress.finish()\n\n return archive\n\n def _should_write_operation(self, operation: Operation) -> bool:\n if not operation.skipped:\n return True\n\n return self._dry_run or self._verbose\n", "path": "poetry/installation/executor.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nimport itertools\nimport os\nimport threading\n\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import wait\nfrom pathlib import Path\nfrom subprocess import CalledProcessError\nfrom typing import TYPE_CHECKING\nfrom typing import Any\nfrom typing import List\nfrom typing import Union\n\nfrom cleo.io.null_io import NullIO\n\nfrom poetry.core.packages.file_dependency import FileDependency\nfrom poetry.core.packages.utils.link import Link\nfrom poetry.core.pyproject.toml import PyProjectTOML\nfrom poetry.utils._compat import decode\nfrom poetry.utils.env import EnvCommandError\nfrom poetry.utils.helpers import safe_rmtree\n\nfrom .authenticator import Authenticator\nfrom .chef import Chef\nfrom .chooser import Chooser\nfrom .operations.install import Install\nfrom .operations.operation import Operation\nfrom .operations.uninstall import Uninstall\nfrom .operations.update import Update\n\n\nif TYPE_CHECKING:\n from cleo.io.io import IO # noqa\n\n from poetry.config.config import Config\n from poetry.repositories import Pool\n from poetry.utils.env import Env\n\n from .operations import OperationTypes\n\n\nclass Executor(object):\n def __init__(\n self,\n env: \"Env\",\n pool: \"Pool\",\n config: \"Config\",\n io: \"IO\",\n parallel: bool = None,\n ) -> None:\n self._env = env\n self._io = io\n self._dry_run = False\n self._enabled = True\n self._verbose = False\n self._authenticator = Authenticator(config, self._io)\n self._chef = Chef(config, self._env)\n self._chooser = Chooser(pool, self._env)\n\n if parallel is None:\n parallel = config.get(\"installer.parallel\", True)\n\n if parallel:\n # This should be directly handled by ThreadPoolExecutor\n # however, on some systems the number of CPUs cannot be determined\n # (it raises a NotImplementedError), so, in this case, we assume\n # that the system only has one CPU.\n try:\n self._max_workers = os.cpu_count() + 4\n except NotImplementedError:\n self._max_workers = 5\n else:\n self._max_workers = 1\n\n self._executor = ThreadPoolExecutor(max_workers=self._max_workers)\n self._total_operations = 0\n self._executed_operations = 0\n self._executed = {\"install\": 0, \"update\": 0, \"uninstall\": 0}\n self._skipped = {\"install\": 0, \"update\": 0, \"uninstall\": 0}\n self._sections = dict()\n self._lock = threading.Lock()\n self._shutdown = False\n\n @property\n def installations_count(self) -> int:\n return self._executed[\"install\"]\n\n @property\n def updates_count(self) -> int:\n return self._executed[\"update\"]\n\n @property\n def removals_count(self) -> int:\n return self._executed[\"uninstall\"]\n\n def supports_fancy_output(self) -> bool:\n return self._io.output.is_decorated() and not self._dry_run\n\n def disable(self) -> \"Executor\":\n self._enabled = False\n\n return self\n\n def dry_run(self, dry_run: bool = True) -> \"Executor\":\n self._dry_run = dry_run\n\n return self\n\n def verbose(self, verbose: bool = True) -> \"Executor\":\n self._verbose = verbose\n\n return self\n\n def execute(self, operations: List[\"OperationTypes\"]) -> int:\n self._total_operations = len(operations)\n for job_type in self._executed:\n self._executed[job_type] = 0\n self._skipped[job_type] = 0\n\n if operations and (self._enabled or self._dry_run):\n self._display_summary(operations)\n\n # We group operations by priority\n groups = itertools.groupby(operations, key=lambda o: -o.priority)\n self._sections = dict()\n for _, group in groups:\n tasks = []\n serial_operations = []\n for operation in group:\n if self._shutdown:\n break\n\n # Some operations are unsafe, we must execute them serially in a group\n # https://github.com/python-poetry/poetry/issues/3086\n # https://github.com/python-poetry/poetry/issues/2658\n #\n # We need to explicitly check source type here, see:\n # https://github.com/python-poetry/poetry-core/pull/98\n is_parallel_unsafe = operation.job_type == \"uninstall\" or (\n operation.package.develop\n and operation.package.source_type in {\"directory\", \"git\"}\n )\n if not operation.skipped and is_parallel_unsafe:\n serial_operations.append(operation)\n continue\n\n tasks.append(self._executor.submit(self._execute_operation, operation))\n\n try:\n wait(tasks)\n\n for operation in serial_operations:\n wait([self._executor.submit(self._execute_operation, operation)])\n\n except KeyboardInterrupt:\n self._shutdown = True\n\n if self._shutdown:\n # Cancelling further tasks from being executed\n [task.cancel() for task in tasks]\n self._executor.shutdown(wait=True)\n\n break\n\n return 1 if self._shutdown else 0\n\n def _write(self, operation: \"OperationTypes\", line: str) -> None:\n if not self.supports_fancy_output() or not self._should_write_operation(\n operation\n ):\n return\n\n if self._io.is_debug():\n with self._lock:\n section = self._sections[id(operation)]\n section.write_line(line)\n\n return\n\n with self._lock:\n section = self._sections[id(operation)]\n section.clear()\n section.write(line)\n\n def _execute_operation(self, operation: \"OperationTypes\") -> None:\n try:\n if self.supports_fancy_output():\n if id(operation) not in self._sections:\n if self._should_write_operation(operation):\n with self._lock:\n self._sections[id(operation)] = self._io.section()\n self._sections[id(operation)].write_line(\n \" <fg=blue;options=bold>•</> {message}: <fg=blue>Pending...</>\".format(\n message=self.get_operation_message(operation),\n ),\n )\n else:\n if self._should_write_operation(operation):\n if not operation.skipped:\n self._io.write_line(\n \" <fg=blue;options=bold>•</> {message}\".format(\n message=self.get_operation_message(operation),\n ),\n )\n else:\n self._io.write_line(\n \" <fg=default;options=bold,dark>•</> {message}: \"\n \"<fg=default;options=bold,dark>Skipped</> \"\n \"<fg=default;options=dark>for the following reason:</> \"\n \"<fg=default;options=bold,dark>{reason}</>\".format(\n message=self.get_operation_message(operation),\n reason=operation.skip_reason,\n )\n )\n\n try:\n result = self._do_execute_operation(operation)\n except EnvCommandError as e:\n if e.e.returncode == -2:\n result = -2\n else:\n raise\n\n # If we have a result of -2 it means a KeyboardInterrupt\n # in the any python subprocess, so we raise a KeyboardInterrupt\n # error to be picked up by the error handler.\n if result == -2:\n raise KeyboardInterrupt\n except Exception as e:\n try:\n from cleo.ui.exception_trace import ExceptionTrace\n\n if not self.supports_fancy_output():\n io = self._io\n else:\n message = (\n \" <error>•</error> {message}: <error>Failed</error>\".format(\n message=self.get_operation_message(operation, error=True),\n )\n )\n self._write(operation, message)\n io = self._sections.get(id(operation), self._io)\n\n with self._lock:\n trace = ExceptionTrace(e)\n trace.render(io)\n io.write_line(\"\")\n finally:\n with self._lock:\n self._shutdown = True\n except KeyboardInterrupt:\n try:\n message = \" <warning>•</warning> {message}: <warning>Cancelled</warning>\".format(\n message=self.get_operation_message(operation, warning=True),\n )\n if not self.supports_fancy_output():\n self._io.write_line(message)\n else:\n self._write(operation, message)\n finally:\n with self._lock:\n self._shutdown = True\n\n def _do_execute_operation(self, operation: \"OperationTypes\") -> int:\n method = operation.job_type\n\n operation_message = self.get_operation_message(operation)\n if operation.skipped:\n if self.supports_fancy_output():\n self._write(\n operation,\n \" <fg=default;options=bold,dark>•</> {message}: \"\n \"<fg=default;options=bold,dark>Skipped</> \"\n \"<fg=default;options=dark>for the following reason:</> \"\n \"<fg=default;options=bold,dark>{reason}</>\".format(\n message=operation_message,\n reason=operation.skip_reason,\n ),\n )\n\n self._skipped[operation.job_type] += 1\n\n return 0\n\n if not self._enabled or self._dry_run:\n self._io.write_line(\n \" <fg=blue;options=bold>•</> {message}\".format(\n message=operation_message,\n )\n )\n\n return 0\n\n result = getattr(self, \"_execute_{}\".format(method))(operation)\n\n if result != 0:\n return result\n\n message = \" <fg=green;options=bold>•</> {message}\".format(\n message=self.get_operation_message(operation, done=True),\n )\n self._write(operation, message)\n\n self._increment_operations_count(operation, True)\n\n return result\n\n def _increment_operations_count(\n self, operation: \"OperationTypes\", executed: bool\n ) -> None:\n with self._lock:\n if executed:\n self._executed_operations += 1\n self._executed[operation.job_type] += 1\n else:\n self._skipped[operation.job_type] += 1\n\n def run_pip(self, *args: Any, **kwargs: Any) -> int:\n try:\n self._env.run_pip(*args, **kwargs)\n except EnvCommandError as e:\n output = decode(e.e.output)\n if (\n \"KeyboardInterrupt\" in output\n or \"ERROR: Operation cancelled by user\" in output\n ):\n return -2\n\n raise\n\n return 0\n\n def get_operation_message(\n self,\n operation: \"OperationTypes\",\n done: bool = False,\n error: bool = False,\n warning: bool = False,\n ) -> str:\n base_tag = \"fg=default\"\n operation_color = \"c2\"\n source_operation_color = \"c2\"\n package_color = \"c1\"\n\n if error:\n operation_color = \"error\"\n elif warning:\n operation_color = \"warning\"\n elif done:\n operation_color = \"success\"\n\n if operation.skipped:\n base_tag = \"fg=default;options=dark\"\n operation_color += \"_dark\"\n source_operation_color += \"_dark\"\n package_color += \"_dark\"\n\n if operation.job_type == \"install\":\n return \"<{}>Installing <{}>{}</{}> (<{}>{}</>)</>\".format(\n base_tag,\n package_color,\n operation.package.name,\n package_color,\n operation_color,\n operation.package.full_pretty_version,\n )\n\n if operation.job_type == \"uninstall\":\n return \"<{}>Removing <{}>{}</{}> (<{}>{}</>)</>\".format(\n base_tag,\n package_color,\n operation.package.name,\n package_color,\n operation_color,\n operation.package.full_pretty_version,\n )\n\n if operation.job_type == \"update\":\n return \"<{}>Updating <{}>{}</{}> (<{}>{}</{}> -> <{}>{}</>)</>\".format(\n base_tag,\n package_color,\n operation.initial_package.name,\n package_color,\n source_operation_color,\n operation.initial_package.full_pretty_version,\n source_operation_color,\n operation_color,\n operation.target_package.full_pretty_version,\n )\n\n return \"\"\n\n def _display_summary(self, operations: List[\"OperationTypes\"]) -> None:\n installs = 0\n updates = 0\n uninstalls = 0\n skipped = 0\n for op in operations:\n if op.skipped:\n skipped += 1\n continue\n\n if op.job_type == \"install\":\n installs += 1\n elif op.job_type == \"update\":\n updates += 1\n elif op.job_type == \"uninstall\":\n uninstalls += 1\n\n if not installs and not updates and not uninstalls and not self._verbose:\n self._io.write_line(\"\")\n self._io.write_line(\"No dependencies to install or update\")\n\n return\n\n self._io.write_line(\"\")\n self._io.write_line(\n \"<b>Package operations</b>: \"\n \"<info>{}</> install{}, \"\n \"<info>{}</> update{}, \"\n \"<info>{}</> removal{}\"\n \"{}\".format(\n installs,\n \"\" if installs == 1 else \"s\",\n updates,\n \"\" if updates == 1 else \"s\",\n uninstalls,\n \"\" if uninstalls == 1 else \"s\",\n \", <info>{}</> skipped\".format(skipped)\n if skipped and self._verbose\n else \"\",\n )\n )\n self._io.write_line(\"\")\n\n def _execute_install(self, operation: Union[Install, Update]) -> int:\n return self._install(operation)\n\n def _execute_update(self, operation: Union[Install, Update]) -> int:\n return self._update(operation)\n\n def _execute_uninstall(self, operation: Uninstall) -> int:\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Removing...</info>\".format(\n message=self.get_operation_message(operation),\n )\n )\n self._write(operation, message)\n\n return self._remove(operation)\n\n def _install(self, operation: Union[Install, Update]) -> int:\n package = operation.package\n if package.source_type == \"directory\":\n return self._install_directory(operation)\n\n if package.source_type == \"git\":\n return self._install_git(operation)\n\n if package.source_type == \"file\":\n archive = self._prepare_file(operation)\n elif package.source_type == \"url\":\n archive = self._download_link(operation, Link(package.source_url))\n else:\n archive = self._download(operation)\n\n operation_message = self.get_operation_message(operation)\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Installing...</info>\".format(\n message=operation_message,\n )\n )\n self._write(operation, message)\n\n args = [\"install\", \"--no-deps\", str(archive)]\n if operation.job_type == \"update\":\n args.insert(2, \"-U\")\n\n return self.run_pip(*args)\n\n def _update(self, operation: Union[Install, Update]) -> int:\n return self._install(operation)\n\n def _remove(self, operation: Uninstall) -> int:\n package = operation.package\n\n # If we have a VCS package, remove its source directory\n if package.source_type == \"git\":\n src_dir = self._env.path / \"src\" / package.name\n if src_dir.exists():\n safe_rmtree(str(src_dir))\n\n try:\n return self.run_pip(\"uninstall\", package.name, \"-y\")\n except CalledProcessError as e:\n if \"not installed\" in str(e):\n return 0\n\n raise\n\n def _prepare_file(self, operation: Union[Install, Update]) -> Path:\n package = operation.package\n\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Preparing...</info>\".format(\n message=self.get_operation_message(operation),\n )\n )\n self._write(operation, message)\n\n archive = Path(package.source_url)\n if not Path(package.source_url).is_absolute() and package.root_dir:\n archive = package.root_dir / archive\n\n archive = self._chef.prepare(archive)\n\n return archive\n\n def _install_directory(self, operation: Union[Install, Update]) -> int:\n from poetry.factory import Factory\n\n package = operation.package\n operation_message = self.get_operation_message(operation)\n\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Building...</info>\".format(\n message=operation_message,\n )\n )\n self._write(operation, message)\n\n if package.root_dir:\n req = os.path.join(str(package.root_dir), package.source_url)\n else:\n req = os.path.realpath(package.source_url)\n\n args = [\"install\", \"--no-deps\", \"-U\"]\n\n pyproject = PyProjectTOML(os.path.join(req, \"pyproject.toml\"))\n\n if pyproject.is_poetry_project():\n # Even if there is a build system specified\n # some versions of pip (< 19.0.0) don't understand it\n # so we need to check the version of pip to know\n # if we can rely on the build system\n legacy_pip = self._env.pip_version < self._env.pip_version.__class__(\n 19, 0, 0\n )\n package_poetry = Factory().create_poetry(pyproject.file.path.parent)\n\n if package.develop and not package_poetry.package.build_script:\n from poetry.masonry.builders.editable import EditableBuilder\n\n # This is a Poetry package in editable mode\n # we can use the EditableBuilder without going through pip\n # to install it, unless it has a build script.\n builder = EditableBuilder(package_poetry, self._env, NullIO())\n builder.build()\n\n return 0\n elif legacy_pip or package_poetry.package.build_script:\n from poetry.core.masonry.builders.sdist import SdistBuilder\n\n # We need to rely on creating a temporary setup.py\n # file since the version of pip does not support\n # build-systems\n # We also need it for non-PEP-517 packages\n builder = SdistBuilder(package_poetry)\n\n with builder.setup_py():\n if package.develop:\n args.append(\"-e\")\n\n args.append(req)\n\n return self.run_pip(*args)\n\n if package.develop:\n args.append(\"-e\")\n\n args.append(req)\n\n return self.run_pip(*args)\n\n def _install_git(self, operation: Union[Install, Update]) -> int:\n from poetry.core.vcs import Git\n\n package = operation.package\n operation_message = self.get_operation_message(operation)\n\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Cloning...</info>\".format(\n message=operation_message,\n )\n )\n self._write(operation, message)\n\n src_dir = self._env.path / \"src\" / package.name\n if src_dir.exists():\n safe_rmtree(str(src_dir))\n\n src_dir.parent.mkdir(exist_ok=True)\n\n git = Git()\n git.clone(package.source_url, src_dir)\n git.checkout(package.source_reference, src_dir)\n\n # Now we just need to install from the source directory\n package._source_url = str(src_dir)\n\n return self._install_directory(operation)\n\n def _download(self, operation: Union[Install, Update]) -> Link:\n link = self._chooser.choose_for(operation.package)\n\n return self._download_link(operation, link)\n\n def _download_link(self, operation: Union[Install, Update], link: Link) -> Link:\n package = operation.package\n\n archive = self._chef.get_cached_archive_for_link(link)\n if archive is link:\n # No cached distributions was found, so we download and prepare it\n try:\n archive = self._download_archive(operation, link)\n except BaseException:\n cache_directory = self._chef.get_cache_directory_for_link(link)\n cached_file = cache_directory.joinpath(link.filename)\n # We can't use unlink(missing_ok=True) because it's not available\n # in pathlib2 for Python 2.7\n if cached_file.exists():\n cached_file.unlink()\n\n raise\n\n # TODO: Check readability of the created archive\n\n if not link.is_wheel:\n archive = self._chef.prepare(archive)\n\n if package.files:\n archive_hash = \"sha256:\" + FileDependency(package.name, archive).hash()\n if archive_hash not in {f[\"hash\"] for f in package.files}:\n raise RuntimeError(\n \"Invalid hash for {} using archive {}\".format(package, archive.name)\n )\n\n return archive\n\n def _download_archive(self, operation: Union[Install, Update], link: Link) -> Path:\n response = self._authenticator.request(\n \"get\", link.url, stream=True, io=self._sections.get(id(operation), self._io)\n )\n wheel_size = response.headers.get(\"content-length\")\n operation_message = self.get_operation_message(operation)\n message = (\n \" <fg=blue;options=bold>•</> {message}: <info>Downloading...</>\".format(\n message=operation_message,\n )\n )\n progress = None\n if self.supports_fancy_output():\n if wheel_size is None:\n self._write(operation, message)\n else:\n from cleo.ui.progress_bar import ProgressBar\n\n progress = ProgressBar(\n self._sections[id(operation)], max=int(wheel_size)\n )\n progress.set_format(message + \" <b>%percent%%</b>\")\n\n if progress:\n with self._lock:\n progress.start()\n\n done = 0\n archive = self._chef.get_cache_directory_for_link(link) / link.filename\n archive.parent.mkdir(parents=True, exist_ok=True)\n with archive.open(\"wb\") as f:\n for chunk in response.iter_content(chunk_size=4096):\n if not chunk:\n break\n\n done += len(chunk)\n\n if progress:\n with self._lock:\n progress.set_progress(done)\n\n f.write(chunk)\n\n if progress:\n with self._lock:\n progress.finish()\n\n return archive\n\n def _should_write_operation(self, operation: Operation) -> bool:\n return not operation.skipped or self._dry_run or self._verbose\n", "path": "poetry/installation/executor.py" } ]
diff --git a/poetry/installation/executor.py b/poetry/installation/executor.py index 520487d2536..ea2940e1fbd 100644 --- a/poetry/installation/executor.py +++ b/poetry/installation/executor.py @@ -700,7 +700,4 @@ def _download_archive(self, operation: Union[Install, Update], link: Link) -> Pa return archive def _should_write_operation(self, operation: Operation) -> bool: - if not operation.skipped: - return True - - return self._dry_run or self._verbose + return not operation.skipped or self._dry_run or self._verbose diff --git a/tests/installation/test_installer.py b/tests/installation/test_installer.py index 32eb63149ea..738787c68d9 100644 --- a/tests/installation/test_installer.py +++ b/tests/installation/test_installer.py @@ -7,7 +7,11 @@ import pytest +from cleo.io.inputs.input import Input +from cleo.io.io import IO from cleo.io.null_io import NullIO +from cleo.io.outputs.buffered_output import BufferedOutput +from cleo.io.outputs.output import Verbosity from poetry.core.packages import ProjectPackage from poetry.core.toml.file import TOMLFile @@ -1889,3 +1893,28 @@ def test_installer_can_handle_old_lock_files( # colorama will be added assert 8 == installer.executor.installations_count + + [email protected]("quiet", [True, False]) +def test_run_with_dependencies_quiet(installer, locker, repo, package, quiet): + package_a = get_package("A", "1.0") + package_b = get_package("B", "1.1") + repo.add_package(package_a) + repo.add_package(package_b) + + installer._io = IO(Input(), BufferedOutput(), BufferedOutput()) + installer._io.set_verbosity(Verbosity.QUIET if quiet else Verbosity.NORMAL) + + package.add_dependency(Factory.create_dependency("A", "~1.0")) + package.add_dependency(Factory.create_dependency("B", "^1.0")) + + installer.run() + expected = fixture("with-dependencies") + + assert locker.written_data == expected + + installer._io.output._buffer.seek(0) + if quiet: + assert installer._io.output._buffer.read() == "" + else: + assert installer._io.output._buffer.read() != ""
zigpy__zha-device-handlers-2902
[Device Support Request] TS0601 _TZE204_yjjdcqsq temperature/humidity sensor ### Problem description The TS0601 _TZE204_yjjdcqsq temperature/humidity sensor does not show any entities in current HA. https://www.amazon.de/-/en/dp/B0BWJHHK89 There's an almost same id (_TZE200_yjjdcqsq, note 200 vs 204) in the repo. I've tried adding this one `TuyaTempHumiditySensorVar03` and `TuyaTempHumiditySensorVar04` (one at a time) and verified the quirk gets applied. Doing so has not yielded useful data _except_ once for one sensor I got one temperature + humidity reading where the temperature seemed to be correct, but humidity pretty far off, and battery was "Unknown". I think that was for the Var03. I've tried with two sensors, the other has never shown anything but "Unknown" for temperature, humidity, and battery. And I haven't seen any new readings for the one that sent some values once either. ### Solution description Sensor working out of the box. ### Screenshots/Video <details><summary>Screenshots/Video</summary> [Paste/upload your media here] </details> ### Device signature <details><summary>Device signature</summary> ```json { "node_descriptor": "NodeDescriptor(logical_type=<LogicalType.EndDevice: 2>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress: 128>, manufacturer_code=4417, maximum_buffer_size=66, maximum_incoming_transfer_size=66, server_mask=10752, maximum_outgoing_transfer_size=66, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=True, *is_full_function_device=False, *is_mains_powered=False, *is_receiver_on_when_idle=False, *is_router=False, *is_security_capable=False)", "endpoints": { "1": { "profile_id": "0x0104", "device_type": "0x0051", "input_clusters": [ "0x0000", "0x0004", "0x0005", "0xef00" ], "output_clusters": [ "0x000a", "0x0019" ] } }, "manufacturer": "_TZE204_yjjdcqsq", "model": "TS0601", "class": "zigpy.device.Device" } ``` </details> ### Diagnostic information <details><summary>Diagnostic information</summary> ```json { "home_assistant": { "installation_type": "Home Assistant OS", "version": "2024.1.2", "dev": false, "hassio": true, "virtualenv": false, "python_version": "3.11.6", "docker": true, "arch": "aarch64", "timezone": "Europe/Helsinki", "os_name": "Linux", "os_version": "6.1.71-haos", "supervisor": "2023.12.0", "host_os": "Home Assistant OS 11.4", "docker_version": "24.0.7", "chassis": "embedded", "run_as_root": true }, "custom_components": { "jatekukko": { "version": "0.11.0", "requirements": [ "pytekukko==0.14.0" ] }, "ical": { "version": "1.6.7", "requirements": [ "icalendar==5.0.7" ] }, "hacs": { "version": "1.33.0", "requirements": [ "aiogithubapi>=22.10.1" ] }, "entsoe": { "version": "0.0.1", "requirements": [ "entsoe-py==0.5.8" ] } }, "integration_manifest": { "domain": "zha", "name": "Zigbee Home Automation", "after_dependencies": [ "onboarding", "usb" ], "codeowners": [ "@dmulcahey", "@adminiuga", "@puddly", "@TheJulianJES" ], "config_flow": true, "dependencies": [ "file_upload" ], "documentation": "https://www.home-assistant.io/integrations/zha", "iot_class": "local_polling", "loggers": [ "aiosqlite", "bellows", "crccheck", "pure_pcapy3", "zhaquirks", "zigpy", "zigpy_deconz", "zigpy_xbee", "zigpy_zigate", "zigpy_znp", "universal_silabs_flasher" ], "requirements": [ "bellows==0.37.6", "pyserial==3.5", "pyserial-asyncio==0.6", "zha-quirks==0.0.109", "zigpy-deconz==0.22.4", "zigpy==0.60.4", "zigpy-xbee==0.20.1", "zigpy-zigate==0.12.0", "zigpy-znp==0.12.1", "universal-silabs-flasher==0.0.15", "pyserial-asyncio-fast==0.11" ], "usb": [ { "vid": "10C4", "pid": "EA60", "description": "*2652*", "known_devices": [ "slae.sh cc2652rb stick" ] }, { "vid": "1A86", "pid": "55D4", "description": "*sonoff*plus*", "known_devices": [ "sonoff zigbee dongle plus v2" ] }, { "vid": "10C4", "pid": "EA60", "description": "*sonoff*plus*", "known_devices": [ "sonoff zigbee dongle plus" ] }, { "vid": "10C4", "pid": "EA60", "description": "*tubeszb*", "known_devices": [ "TubesZB Coordinator" ] }, { "vid": "1A86", "pid": "7523", "description": "*tubeszb*", "known_devices": [ "TubesZB Coordinator" ] }, { "vid": "1A86", "pid": "7523", "description": "*zigstar*", "known_devices": [ "ZigStar Coordinators" ] }, { "vid": "1CF1", "pid": "0030", "description": "*conbee*", "known_devices": [ "Conbee II" ] }, { "vid": "0403", "pid": "6015", "description": "*conbee*", "known_devices": [ "Conbee III" ] }, { "vid": "10C4", "pid": "8A2A", "description": "*zigbee*", "known_devices": [ "Nortek HUSBZB-1" ] }, { "vid": "0403", "pid": "6015", "description": "*zigate*", "known_devices": [ "ZiGate+" ] }, { "vid": "10C4", "pid": "EA60", "description": "*zigate*", "known_devices": [ "ZiGate" ] }, { "vid": "10C4", "pid": "8B34", "description": "*bv 2010/10*", "known_devices": [ "Bitron Video AV2010/10" ] } ], "zeroconf": [ { "type": "_esphomelib._tcp.local.", "name": "tube*" }, { "type": "_zigate-zigbee-gateway._tcp.local.", "name": "*zigate*" }, { "type": "_zigstar_gw._tcp.local.", "name": "*zigstar*" }, { "type": "_uzg-01._tcp.local.", "name": "uzg-01*" }, { "type": "_slzb-06._tcp.local.", "name": "slzb-06*" } ], "is_built_in": true }, "data": { "ieee": "**REDACTED**", "nwk": 6268, "manufacturer": "_TZE204_yjjdcqsq", "model": "TS0601", "name": "_TZE204_yjjdcqsq TS0601", "quirk_applied": false, "quirk_class": "zigpy.device.Device", "quirk_id": null, "manufacturer_code": 4417, "power_source": "Battery or Unknown", "lqi": 255, "rssi": -72, "last_seen": "2024-01-10T17:28:12", "available": true, "device_type": "EndDevice", "signature": { "node_descriptor": "NodeDescriptor(logical_type=<LogicalType.EndDevice: 2>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress: 128>, manufacturer_code=4417, maximum_buffer_size=66, maximum_incoming_transfer_size=66, server_mask=10752, maximum_outgoing_transfer_size=66, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=True, *is_full_function_device=False, *is_mains_powered=False, *is_receiver_on_when_idle=False, *is_router=False, *is_security_capable=False)", "endpoints": { "1": { "profile_id": "0x0104", "device_type": "0x0051", "input_clusters": [ "0x0000", "0x0004", "0x0005", "0xef00" ], "output_clusters": [ "0x000a", "0x0019" ] } }, "manufacturer": "_TZE204_yjjdcqsq", "model": "TS0601" }, "active_coordinator": false, "entities": [], "neighbors": [], "routes": [], "endpoint_names": [ { "name": "SMART_PLUG" } ], "user_given_name": null, "device_reg_id": "51b57764ccfc6310f784ac141ab39578", "area_id": "a2e1df9ac6fb4acc817dd068c772d150", "cluster_details": { "1": { "device_type": { "name": "SMART_PLUG", "id": 81 }, "profile_id": 260, "in_clusters": { "0x0004": { "endpoint_attribute": "groups", "attributes": {}, "unsupported_attributes": {} }, "0x0005": { "endpoint_attribute": "scenes", "attributes": {}, "unsupported_attributes": {} }, "0xef00": { "endpoint_attribute": null, "attributes": {}, "unsupported_attributes": {} }, "0x0000": { "endpoint_attribute": "basic", "attributes": { "0x0001": { "attribute_name": "app_version", "value": 73 }, "0x0004": { "attribute_name": "manufacturer", "value": "_TZE204_yjjdcqsq" }, "0x0005": { "attribute_name": "model", "value": "TS0601" } }, "unsupported_attributes": {} } }, "out_clusters": { "0x0019": { "endpoint_attribute": "ota", "attributes": {}, "unsupported_attributes": {} }, "0x000a": { "endpoint_attribute": "time", "attributes": {}, "unsupported_attributes": {} } } } } } } ``` </details> ### Logs <details><summary>Logs</summary> ```python [Paste the logs here] ``` </details> ### Custom quirk <details><summary>Custom quirk</summary> ```python [Paste your custom quirk here] ``` </details> ### Additional information zigbee-herdsman-converters adds it as an alias to the TZE200 one, https://github.com/Koenkk/zigbee-herdsman-converters/commit/95398b53a6af0526906c5f4d9ee50bbc9056d688 But as said I haven't got too promising results doing the equivalent in my tests.
[ { "content": "\"\"\"Tuya temp and humidity sensors.\"\"\"\n\nfrom typing import Any, Dict\n\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.general import Basic, Groups, Ota, Scenes, Time\nfrom zigpy.zcl.clusters.measurement import (\n RelativeHumidity,\n SoilMoisture,\n TemperatureMeasurement,\n)\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SKIP_CONFIGURATION,\n)\nfrom zhaquirks.tuya import TuyaLocalCluster, TuyaPowerConfigurationCluster2AAA\nfrom zhaquirks.tuya.mcu import DPToAttributeMapping, TuyaMCUCluster\n\n\nclass TuyaTemperatureMeasurement(TemperatureMeasurement, TuyaLocalCluster):\n \"\"\"Tuya local TemperatureMeasurement cluster.\"\"\"\n\n\nclass TuyaSoilMoisture(SoilMoisture, TuyaLocalCluster):\n \"\"\"Tuya local SoilMoisture cluster with a device RH_MULTIPLIER factor if required.\"\"\"\n\n\nclass TuyaRelativeHumidity(RelativeHumidity, TuyaLocalCluster):\n \"\"\"Tuya local RelativeHumidity cluster with a device RH_MULTIPLIER factor.\"\"\"\n\n def update_attribute(self, attr_name: str, value: Any) -> None:\n \"\"\"Apply a correction factor to value.\"\"\"\n\n if attr_name == \"measured_value\":\n value = value * (\n self.endpoint.device.RH_MULTIPLIER\n if hasattr(self.endpoint.device, \"RH_MULTIPLIER\")\n else 100\n )\n return super().update_attribute(attr_name, value)\n\n\nclass TemperatureHumidityManufCluster(TuyaMCUCluster):\n \"\"\"Tuya Manufacturer Cluster with Temperature and Humidity data points.\"\"\"\n\n dp_to_attribute: Dict[int, DPToAttributeMapping] = {\n 1: DPToAttributeMapping(\n TuyaTemperatureMeasurement.ep_attribute,\n \"measured_value\",\n converter=lambda x: x * 10, # decidegree to centidegree\n ),\n 2: DPToAttributeMapping(\n TuyaRelativeHumidity.ep_attribute,\n \"measured_value\",\n # converter=lambda x: x * 10, --> move conversion to TuyaRelativeHumidity cluster\n ),\n 4: DPToAttributeMapping(\n TuyaPowerConfigurationCluster2AAA.ep_attribute,\n \"battery_percentage_remaining\",\n converter=lambda x: x * 2, # double reported percentage\n ),\n }\n\n data_point_handlers = {\n 1: \"_dp_2_attr_update\",\n 2: \"_dp_2_attr_update\",\n 4: \"_dp_2_attr_update\",\n }\n\n\nclass TemperatureHumidityBatteryStatesManufCluster(TuyaMCUCluster):\n \"\"\"Tuya Manufacturer Cluster with Temperature and Humidity data points. Battery states 25, 50 and 100%.\"\"\"\n\n dp_to_attribute: Dict[int, DPToAttributeMapping] = {\n 1: TemperatureHumidityManufCluster.dp_to_attribute[1],\n 2: TemperatureHumidityManufCluster.dp_to_attribute[2],\n 3: DPToAttributeMapping(\n TuyaPowerConfigurationCluster2AAA.ep_attribute,\n \"battery_percentage_remaining\",\n converter=lambda x: {0: 25, 1: 50, 2: 100}[x], # double reported percentage\n ),\n }\n\n data_point_handlers = {\n 1: \"_dp_2_attr_update\",\n 2: \"_dp_2_attr_update\",\n 3: \"_dp_2_attr_update\",\n }\n\n\nclass TuyaTempHumiditySensor(CustomDevice):\n \"\"\"Custom device representing tuya temp and humidity sensor with e-ink screen.\"\"\"\n\n # RelativeHumidity multiplier\n RH_MULTIPLIER = 10\n\n signature = {\n # <SimpleDescriptor endpoint=1, profile=260, device_type=81\n # device_version=1\n # input_clusters=[4, 5, 61184, 0]\n # output_clusters=[25, 10]>\n MODELS_INFO: [\n (\"_TZE200_bjawzodf\", \"TS0601\"),\n (\"_TZE200_zl1kmjqx\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n TemperatureHumidityManufCluster, # Single bus for temp, humidity, and battery\n TuyaTemperatureMeasurement,\n TuyaRelativeHumidity,\n TuyaPowerConfigurationCluster2AAA,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n\nclass TuyaTempHumiditySensor_Square(CustomDevice):\n \"\"\"Custom device representing tuya temp and humidity sensor with e-ink screen.\"\"\"\n\n # RelativeHumidity multiplier\n # RH_MULTIPLIER = 100\n\n signature = {\n MODELS_INFO: [\n (\"_TZE200_a8sdabtg\", \"TS0601\"), # Variant without screen, round\n (\"_TZE200_qoy0ekbd\", \"TS0601\"),\n (\"_TZE200_znbl8dj5\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n # \"profile_id\": 260, \"device_type\": \"0x0302\",\n # \"in_clusters\": [\"0x0000\",\"0x0001\",\"0x0402\",\"0x0405\"],\n # \"out_clusters\": [\"0x000a\",\"0x0019\"]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n TuyaPowerConfigurationCluster2AAA.cluster_id,\n TemperatureMeasurement.cluster_id,\n RelativeHumidity.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n TuyaPowerConfigurationCluster2AAA,\n TemperatureHumidityManufCluster,\n TuyaTemperatureMeasurement,\n TuyaRelativeHumidity,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n\nclass TuyaTempHumiditySensorVar03(CustomDevice):\n \"\"\"Tuya temp and humidity sensor (variation 03).\"\"\"\n\n signature = {\n # \"profile_id\": 260,\n # \"device_type\": \"0x0051\",\n # \"in_clusters\": [\"0x0000\",\"0x0004\",\"0x0005\",\"0xef00\"],\n # \"out_clusters\": [\"0x000a\",\"0x0019\"]\n MODELS_INFO: [\n (\"_TZE200_qyflbnbj\", \"TS0601\"),\n (\"_TZE200_utkemkbs\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster,\n TuyaTemperatureMeasurement,\n TuyaRelativeHumidity,\n TuyaPowerConfigurationCluster2AAA,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n\nclass TuyaTempHumiditySensorVar04(CustomDevice):\n \"\"\"Tuya temp and humidity sensor (variation 04).\"\"\"\n\n signature = {\n # \"profile_id\": 260,\n # \"device_type\": \"0x0051\",\n # \"in_clusters\": [\"0x0000\",\"0x0004\",\"0x0005\",\"0xef00\"],\n # \"out_clusters\": [\"0x000a\",\"0x0019\"]\n MODELS_INFO: [\n (\"_TZE200_yjjdcqsq\", \"TS0601\"),\n (\"_TZE200_9yapgbuv\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityBatteryStatesManufCluster,\n TuyaTemperatureMeasurement,\n TuyaRelativeHumidity,\n TuyaPowerConfigurationCluster2AAA,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n\nclass SoilManufCluster(TuyaMCUCluster):\n \"\"\"Tuya Manufacturer Cluster with Temperature and Humidity data points.\"\"\"\n\n dp_to_attribute: Dict[int, DPToAttributeMapping] = {\n 5: DPToAttributeMapping(\n TuyaTemperatureMeasurement.ep_attribute,\n \"measured_value\",\n converter=lambda x: x * 100,\n ),\n 3: DPToAttributeMapping(\n TuyaSoilMoisture.ep_attribute,\n \"measured_value\",\n converter=lambda x: x * 100,\n ),\n 15: DPToAttributeMapping(\n TuyaPowerConfigurationCluster2AAA.ep_attribute,\n \"battery_percentage_remaining\",\n converter=lambda x: x * 2, # double reported percentage\n ),\n }\n\n data_point_handlers = {\n 3: \"_dp_2_attr_update\",\n 5: \"_dp_2_attr_update\",\n 15: \"_dp_2_attr_update\",\n }\n\n\nclass TuyaSoilSensor(CustomDevice):\n \"\"\"Tuya temp and humidity sensor (variation 03).\"\"\"\n\n signature = {\n # \"profile_id\": 260,\n # \"device_type\": \"0x0051\",\n # \"in_clusters\": [\"0x0000\",\"0x0004\",\"0x0005\",\"0xef00\"],\n # \"out_clusters\": [\"0x000a\",\"0x0019\"]\n MODELS_INFO: [\n (\"_TZE200_myd45weu\", \"TS0601\"),\n (\"_TZE200_ga1maeof\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n SoilManufCluster,\n TuyaTemperatureMeasurement,\n TuyaSoilMoisture,\n TuyaPowerConfigurationCluster2AAA,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n", "path": "zhaquirks/tuya/ts0601_sensor.py" } ]
[ { "content": "\"\"\"Tuya temp and humidity sensors.\"\"\"\n\nfrom typing import Any, Dict\n\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.general import Basic, Groups, Ota, Scenes, Time\nfrom zigpy.zcl.clusters.measurement import (\n RelativeHumidity,\n SoilMoisture,\n TemperatureMeasurement,\n)\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SKIP_CONFIGURATION,\n)\nfrom zhaquirks.tuya import TuyaLocalCluster, TuyaPowerConfigurationCluster2AAA\nfrom zhaquirks.tuya.mcu import DPToAttributeMapping, TuyaMCUCluster\n\n\nclass TuyaTemperatureMeasurement(TemperatureMeasurement, TuyaLocalCluster):\n \"\"\"Tuya local TemperatureMeasurement cluster.\"\"\"\n\n\nclass TuyaSoilMoisture(SoilMoisture, TuyaLocalCluster):\n \"\"\"Tuya local SoilMoisture cluster with a device RH_MULTIPLIER factor if required.\"\"\"\n\n\nclass TuyaRelativeHumidity(RelativeHumidity, TuyaLocalCluster):\n \"\"\"Tuya local RelativeHumidity cluster with a device RH_MULTIPLIER factor.\"\"\"\n\n def update_attribute(self, attr_name: str, value: Any) -> None:\n \"\"\"Apply a correction factor to value.\"\"\"\n\n if attr_name == \"measured_value\":\n value = value * (\n self.endpoint.device.RH_MULTIPLIER\n if hasattr(self.endpoint.device, \"RH_MULTIPLIER\")\n else 100\n )\n return super().update_attribute(attr_name, value)\n\n\nclass TemperatureHumidityManufCluster(TuyaMCUCluster):\n \"\"\"Tuya Manufacturer Cluster with Temperature and Humidity data points.\"\"\"\n\n dp_to_attribute: Dict[int, DPToAttributeMapping] = {\n 1: DPToAttributeMapping(\n TuyaTemperatureMeasurement.ep_attribute,\n \"measured_value\",\n converter=lambda x: x * 10, # decidegree to centidegree\n ),\n 2: DPToAttributeMapping(\n TuyaRelativeHumidity.ep_attribute,\n \"measured_value\",\n # converter=lambda x: x * 10, --> move conversion to TuyaRelativeHumidity cluster\n ),\n 4: DPToAttributeMapping(\n TuyaPowerConfigurationCluster2AAA.ep_attribute,\n \"battery_percentage_remaining\",\n converter=lambda x: x * 2, # double reported percentage\n ),\n }\n\n data_point_handlers = {\n 1: \"_dp_2_attr_update\",\n 2: \"_dp_2_attr_update\",\n 4: \"_dp_2_attr_update\",\n }\n\n\nclass TemperatureHumidityBatteryStatesManufCluster(TuyaMCUCluster):\n \"\"\"Tuya Manufacturer Cluster with Temperature and Humidity data points. Battery states 25, 50 and 100%.\"\"\"\n\n dp_to_attribute: Dict[int, DPToAttributeMapping] = {\n 1: TemperatureHumidityManufCluster.dp_to_attribute[1],\n 2: TemperatureHumidityManufCluster.dp_to_attribute[2],\n 3: DPToAttributeMapping(\n TuyaPowerConfigurationCluster2AAA.ep_attribute,\n \"battery_percentage_remaining\",\n converter=lambda x: {0: 25, 1: 50, 2: 100}[x], # double reported percentage\n ),\n }\n\n data_point_handlers = {\n 1: \"_dp_2_attr_update\",\n 2: \"_dp_2_attr_update\",\n 3: \"_dp_2_attr_update\",\n }\n\n\nclass TuyaTempHumiditySensor(CustomDevice):\n \"\"\"Custom device representing tuya temp and humidity sensor with e-ink screen.\"\"\"\n\n # RelativeHumidity multiplier\n RH_MULTIPLIER = 10\n\n signature = {\n # <SimpleDescriptor endpoint=1, profile=260, device_type=81\n # device_version=1\n # input_clusters=[4, 5, 61184, 0]\n # output_clusters=[25, 10]>\n MODELS_INFO: [\n (\"_TZE200_bjawzodf\", \"TS0601\"),\n (\"_TZE200_zl1kmjqx\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n TemperatureHumidityManufCluster, # Single bus for temp, humidity, and battery\n TuyaTemperatureMeasurement,\n TuyaRelativeHumidity,\n TuyaPowerConfigurationCluster2AAA,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n\nclass TuyaTempHumiditySensor_Square(CustomDevice):\n \"\"\"Custom device representing tuya temp and humidity sensor with e-ink screen.\"\"\"\n\n # RelativeHumidity multiplier\n # RH_MULTIPLIER = 100\n\n signature = {\n MODELS_INFO: [\n (\"_TZE200_a8sdabtg\", \"TS0601\"), # Variant without screen, round\n (\"_TZE200_qoy0ekbd\", \"TS0601\"),\n (\"_TZE200_znbl8dj5\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n # \"profile_id\": 260, \"device_type\": \"0x0302\",\n # \"in_clusters\": [\"0x0000\",\"0x0001\",\"0x0402\",\"0x0405\"],\n # \"out_clusters\": [\"0x000a\",\"0x0019\"]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n TuyaPowerConfigurationCluster2AAA.cluster_id,\n TemperatureMeasurement.cluster_id,\n RelativeHumidity.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n TuyaPowerConfigurationCluster2AAA,\n TemperatureHumidityManufCluster,\n TuyaTemperatureMeasurement,\n TuyaRelativeHumidity,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n\nclass TuyaTempHumiditySensorVar03(CustomDevice):\n \"\"\"Tuya temp and humidity sensor (variation 03).\"\"\"\n\n signature = {\n # \"profile_id\": 260,\n # \"device_type\": \"0x0051\",\n # \"in_clusters\": [\"0x0000\",\"0x0004\",\"0x0005\",\"0xef00\"],\n # \"out_clusters\": [\"0x000a\",\"0x0019\"]\n MODELS_INFO: [\n (\"_TZE200_qyflbnbj\", \"TS0601\"),\n (\"_TZE200_utkemkbs\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster,\n TuyaTemperatureMeasurement,\n TuyaRelativeHumidity,\n TuyaPowerConfigurationCluster2AAA,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n\nclass TuyaTempHumiditySensorVar04(CustomDevice):\n \"\"\"Tuya temp and humidity sensor (variation 04).\"\"\"\n\n signature = {\n # \"profile_id\": 260,\n # \"device_type\": \"0x0051\",\n # \"in_clusters\": [\"0x0000\",\"0x0004\",\"0x0005\",\"0xef00\"],\n # \"out_clusters\": [\"0x000a\",\"0x0019\"]\n MODELS_INFO: [\n (\"_TZE200_yjjdcqsq\", \"TS0601\"),\n (\"_TZE200_9yapgbuv\", \"TS0601\"),\n (\"_TZE204_yjjdcqsq\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityBatteryStatesManufCluster,\n TuyaTemperatureMeasurement,\n TuyaRelativeHumidity,\n TuyaPowerConfigurationCluster2AAA,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n\nclass SoilManufCluster(TuyaMCUCluster):\n \"\"\"Tuya Manufacturer Cluster with Temperature and Humidity data points.\"\"\"\n\n dp_to_attribute: Dict[int, DPToAttributeMapping] = {\n 5: DPToAttributeMapping(\n TuyaTemperatureMeasurement.ep_attribute,\n \"measured_value\",\n converter=lambda x: x * 100,\n ),\n 3: DPToAttributeMapping(\n TuyaSoilMoisture.ep_attribute,\n \"measured_value\",\n converter=lambda x: x * 100,\n ),\n 15: DPToAttributeMapping(\n TuyaPowerConfigurationCluster2AAA.ep_attribute,\n \"battery_percentage_remaining\",\n converter=lambda x: x * 2, # double reported percentage\n ),\n }\n\n data_point_handlers = {\n 3: \"_dp_2_attr_update\",\n 5: \"_dp_2_attr_update\",\n 15: \"_dp_2_attr_update\",\n }\n\n\nclass TuyaSoilSensor(CustomDevice):\n \"\"\"Tuya temp and humidity sensor (variation 03).\"\"\"\n\n signature = {\n # \"profile_id\": 260,\n # \"device_type\": \"0x0051\",\n # \"in_clusters\": [\"0x0000\",\"0x0004\",\"0x0005\",\"0xef00\"],\n # \"out_clusters\": [\"0x000a\",\"0x0019\"]\n MODELS_INFO: [\n (\"_TZE200_myd45weu\", \"TS0601\"),\n (\"_TZE200_ga1maeof\", \"TS0601\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TemperatureHumidityManufCluster.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n DEVICE_TYPE: zha.DeviceType.TEMPERATURE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n SoilManufCluster,\n TuyaTemperatureMeasurement,\n TuyaSoilMoisture,\n TuyaPowerConfigurationCluster2AAA,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, Time.cluster_id],\n }\n },\n }\n", "path": "zhaquirks/tuya/ts0601_sensor.py" } ]
diff --git a/zhaquirks/tuya/ts0601_sensor.py b/zhaquirks/tuya/ts0601_sensor.py index aaed2ba083..54bd978535 100644 --- a/zhaquirks/tuya/ts0601_sensor.py +++ b/zhaquirks/tuya/ts0601_sensor.py @@ -250,6 +250,7 @@ class TuyaTempHumiditySensorVar04(CustomDevice): MODELS_INFO: [ ("_TZE200_yjjdcqsq", "TS0601"), ("_TZE200_9yapgbuv", "TS0601"), + ("_TZE204_yjjdcqsq", "TS0601"), ], ENDPOINTS: { 1: {
ckan__ckan-6125
Remove remaining Python 2 code in core This should be done in separate pull requests to make reviews easier - [x] Remove `requirements.py2.*` files and update documentation to remove py2 mentions - [x] Remove py2 specific code. Look for `if six.PY2:` and remove what's inside! - [x] Remove py3 checks. Look for `six.PY3` and make that the standard code run (remove the check if any) - [x] Remove usage of six. We should not need the compatibility layer any more. Make all code standard Python 3 This should definitely be done separately - [ ] Remove unicode literals (eg `my_conf[u"test_key_1"] = u"Test value 1"` -> `my_conf["test_key_1"] = "Test value 1"`
[ { "content": "# encoding: utf-8\n\nimport os\nimport os.path\n\nfrom pkg_resources import parse_version\n\n# Avoid problem releasing to pypi from vagrant\nif os.environ.get('USER', '') == 'vagrant':\n del os.link\n\ntry:\n from setuptools import (setup, find_packages,\n __version__ as setuptools_version)\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import (setup, find_packages,\n __version__ as setuptools_version)\n\nfrom ckan import (__version__, __description__, __long_description__,\n __license__)\n\n\n#\n# Check setuptools version\n#\n\nHERE = os.path.dirname(__file__)\nwith open(os.path.join(HERE, 'requirement-setuptools.txt')) as f:\n setuptools_requirement = f.read().strip()\nmin_setuptools_version = parse_version(setuptools_requirement.split('==')[1])\nif parse_version(setuptools_version) < min_setuptools_version:\n raise AssertionError(\n 'setuptools version error\\n'\n 'You need a newer version of setuptools.\\n'\n 'Install the recommended version:\\n'\n ' pip install -r requirement-setuptools.txt\\n'\n 'and then try again to install ckan into your python environment.'\n )\n\n\nentry_points = {\n 'paste.app_factory': [\n 'main = ckan.config.middleware:make_app',\n ],\n 'paste.app_install': [\n 'main = ckan.config.install:CKANInstaller',\n ],\n 'console_scripts': [\n 'ckan = ckan.cli.cli:ckan',\n ],\n 'ckan.click_command': [\n 'datastore = ckanext.datastore.cli:datastore',\n 'datapusher = ckanext.datapusher.cli:datapusher',\n ],\n 'ckan.forms': [\n 'standard = ckan.forms.package:get_standard_fieldset',\n 'package = ckan.forms.package:get_standard_fieldset',\n 'group = ckan.forms.group:get_group_fieldset',\n 'package_group = ckan.forms.group:get_package_group_fieldset',\n ],\n 'ckan.search': [\n 'sql = ckan.lib.search.sql:SqlSearchBackend',\n 'solr = ckan.lib.search.solr_backend:SolrSearchBackend',\n ],\n 'ckan.plugins': [\n 'stats = ckanext.stats.plugin:StatsPlugin',\n 'multilingual_dataset = ckanext.multilingual.plugin:MultilingualDataset',\n 'multilingual_group = ckanext.multilingual.plugin:MultilingualGroup',\n 'multilingual_tag = ckanext.multilingual.plugin:MultilingualTag',\n 'multilingual_resource = ckanext.multilingual.plugin:MultilingualResource',\n 'expire_api_token = ckanext.expire_api_token.plugin:ExpireApiTokenPlugin',\n 'chained_functions = ckanext.chained_functions.plugin:ChainedFunctionsPlugin',\n 'datastore = ckanext.datastore.plugin:DatastorePlugin',\n 'datapusher=ckanext.datapusher.plugin:DatapusherPlugin',\n 'test_tag_vocab_plugin = ckanext.test_tag_vocab_plugin:MockVocabTagsPlugin',\n 'resource_proxy = ckanext.resourceproxy.plugin:ResourceProxy',\n 'text_view = ckanext.textview.plugin:TextView',\n 'recline_view = ckanext.reclineview.plugin:ReclineView',\n 'recline_grid_view = ckanext.reclineview.plugin:ReclineGridView',\n 'recline_graph_view = ckanext.reclineview.plugin:ReclineGraphView',\n 'recline_map_view = ckanext.reclineview.plugin:ReclineMapView',\n 'datatables_view = ckanext.datatablesview.plugin:DataTablesView',\n 'image_view = ckanext.imageview.plugin:ImageView',\n 'audio_view = ckanext.audioview.plugin:AudioView',\n 'video_view = ckanext.videoview.plugin:VideoView',\n 'webpage_view = ckanext.webpageview.plugin:WebPageView',\n # FIXME: Remove deprecated resource previews below. You should use the\n # versions as *_view instead.\n 'text_preview = ckanext.textview.plugin:TextView',\n 'recline_preview = ckanext.reclineview.plugin:ReclineView',\n 'recline_grid = ckanext.reclineview.plugin:ReclineGridView',\n 'recline_graph = ckanext.reclineview.plugin:ReclineGraphView',\n 'recline_map = ckanext.reclineview.plugin:ReclineMapView',\n # End of deprecated previews\n 'example_itemplatehelpers = ckanext.example_itemplatehelpers.plugin:ExampleITemplateHelpersPlugin',\n 'example_idatasetform = ckanext.example_idatasetform.plugin:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v1 = ckanext.example_idatasetform.plugin_v1:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v2 = ckanext.example_idatasetform.plugin_v2:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v3 = ckanext.example_idatasetform.plugin_v3:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v4 = ckanext.example_idatasetform.plugin_v4:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v5 = ckanext.example_idatasetform.plugin_v5:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v6 = ckanext.example_idatasetform.plugin_v6:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v7 = ckanext.example_idatasetform.plugin_v7:ExampleIDatasetFormPlugin',\n 'example_igroupform = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin',\n 'example_igroupform_v2 = ckanext.example_igroupform.plugin_v2:ExampleIGroupFormPlugin',\n 'example_igroupform_default_group_type = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin_DefaultGroupType',\n 'example_igroupform_organization = ckanext.example_igroupform.plugin:ExampleIGroupFormOrganizationPlugin',\n 'example_iauthfunctions_v1 = ckanext.example_iauthfunctions.plugin_v1:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v2 = ckanext.example_iauthfunctions.plugin_v2:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v3 = ckanext.example_iauthfunctions.plugin_v3:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v4 = ckanext.example_iauthfunctions.plugin_v4:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v5_custom_config_setting = ckanext.example_iauthfunctions.plugin_v5_custom_config_setting:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v6_parent_auth_functions = ckanext.example_iauthfunctions.plugin_v6_parent_auth_functions:ExampleIAuthFunctionsPlugin',\n 'example_theme_v01_empty_extension = ckanext.example_theme_docs.v01_empty_extension.plugin:ExampleThemePlugin',\n 'example_theme_v02_empty_template = ckanext.example_theme_docs.v02_empty_template.plugin:ExampleThemePlugin',\n 'example_theme_v03_jinja = ckanext.example_theme_docs.v03_jinja.plugin:ExampleThemePlugin',\n 'example_theme_v04_ckan_extends = ckanext.example_theme_docs.v04_ckan_extends.plugin:ExampleThemePlugin',\n 'example_theme_v05_block = ckanext.example_theme_docs.v05_block.plugin:ExampleThemePlugin',\n 'example_theme_v06_super = ckanext.example_theme_docs.v06_super.plugin:ExampleThemePlugin',\n 'example_theme_v07_helper_function = ckanext.example_theme_docs.v07_helper_function.plugin:ExampleThemePlugin',\n 'example_theme_v08_custom_helper_function = ckanext.example_theme_docs.v08_custom_helper_function.plugin:ExampleThemePlugin',\n 'example_theme_v09_snippet = ckanext.example_theme_docs.v09_snippet.plugin:ExampleThemePlugin',\n 'example_theme_v10_custom_snippet = ckanext.example_theme_docs.v10_custom_snippet.plugin:ExampleThemePlugin',\n 'example_theme_v11_HTML_and_CSS = ckanext.example_theme_docs.v11_HTML_and_CSS.plugin:ExampleThemePlugin',\n 'example_theme_v12_extra_public_dir = ckanext.example_theme_docs.v12_extra_public_dir.plugin:ExampleThemePlugin',\n 'example_theme_v13_custom_css = ckanext.example_theme_docs.v13_custom_css.plugin:ExampleThemePlugin',\n 'example_theme_v14_more_custom_css = ckanext.example_theme_docs.v14_more_custom_css.plugin:ExampleThemePlugin',\n 'example_theme_v15_webassets = ckanext.example_theme_docs.v15_webassets.plugin:ExampleThemePlugin',\n 'example_theme_v16_initialize_a_javascript_module = ckanext.example_theme_docs.v16_initialize_a_javascript_module.plugin:ExampleThemePlugin',\n 'example_theme_v17_popover = ckanext.example_theme_docs.v17_popover.plugin:ExampleThemePlugin',\n 'example_theme_v18_snippet_api = ckanext.example_theme_docs.v18_snippet_api.plugin:ExampleThemePlugin',\n 'example_theme_v19_01_error = ckanext.example_theme_docs.v19_01_error.plugin:ExampleThemePlugin',\n 'example_theme_v19_02_error_handling = ckanext.example_theme_docs.v19_02_error_handling.plugin:ExampleThemePlugin',\n 'example_theme_v20_pubsub = ckanext.example_theme_docs.v20_pubsub.plugin:ExampleThemePlugin',\n 'example_theme_v21_custom_jquery_plugin = ckanext.example_theme_docs.v21_custom_jquery_plugin.plugin:ExampleThemePlugin',\n 'example_theme_v22_webassets = ckanext.example_theme_docs.v22_webassets.plugin:ExampleThemePlugin',\n 'example_theme_custom_config_setting = ckanext.example_theme_docs.custom_config_setting.plugin:ExampleThemePlugin',\n 'example_theme_custom_emails = ckanext.example_theme_docs.custom_emails.plugin:ExampleCustomEmailsPlugin',\n 'example_iresourcecontroller = ckanext.example_iresourcecontroller.plugin:ExampleIResourceControllerPlugin',\n 'example_ivalidators = ckanext.example_ivalidators.plugin:ExampleIValidatorsPlugin',\n 'example_iconfigurer = ckanext.example_iconfigurer.plugin:ExampleIConfigurerPlugin',\n 'example_itranslation = ckanext.example_itranslation.plugin:ExampleITranslationPlugin',\n 'example_iconfigurer_v1 = ckanext.example_iconfigurer.plugin_v1:ExampleIConfigurerPlugin',\n 'example_iconfigurer_v2 = ckanext.example_iconfigurer.plugin_v2:ExampleIConfigurerPlugin',\n 'example_flask_iblueprint = ckanext.example_flask_iblueprint.plugin:ExampleFlaskIBlueprintPlugin',\n 'example_flask_streaming = ckanext.example_flask_streaming.plugin:ExampleFlaskStreamingPlugin',\n 'example_iuploader = ckanext.example_iuploader.plugin:ExampleIUploader',\n 'example_idatastorebackend = ckanext.example_idatastorebackend.plugin:ExampleIDatastoreBackendPlugin',\n 'example_ipermissionlabels = ckanext.example_ipermissionlabels.plugin:ExampleIPermissionLabelsPlugin',\n 'example_iapitoken = ckanext.example_iapitoken.plugin:ExampleIApiTokenPlugin',\n 'example_iclick = ckanext.example_iclick.plugin:ExampleIClickPlugin',\n 'example_iauthenticator = ckanext.example_iauthenticator.plugin:ExampleIAuthenticatorPlugin',\n 'example_humanizer = ckanext.example_humanizer.plugin:ExampleHumanizerPlugin',\n ],\n 'ckan.system_plugins': [\n 'synchronous_search = ckan.lib.search:SynchronousSearchPlugin',\n 'domain_object_mods = ckan.model.modification:DomainObjectModificationExtension',\n ],\n 'ckan.test_plugins': [\n 'routes_plugin = tests.legacy.ckantestplugins:RoutesPlugin',\n 'mapper_plugin = tests.legacy.ckantestplugins:MapperPlugin',\n 'session_plugin = tests.legacy.ckantestplugins:SessionPlugin',\n 'mapper_plugin2 = tests.legacy.ckantestplugins:MapperPlugin2',\n 'authorizer_plugin = tests.legacy.ckantestplugins:AuthorizerPlugin',\n 'test_observer_plugin = tests.legacy.ckantestplugins:PluginObserverPlugin',\n 'action_plugin = tests.legacy.ckantestplugins:ActionPlugin',\n 'auth_plugin = tests.legacy.ckantestplugins:AuthPlugin',\n 'test_group_plugin = tests.legacy.ckantestplugins:MockGroupControllerPlugin',\n 'test_package_controller_plugin = tests.legacy.ckantestplugins:MockPackageControllerPlugin',\n 'test_resource_preview = tests.legacy.ckantestplugins:MockResourcePreviewExtension',\n 'test_json_resource_preview = tests.legacy.ckantestplugins:JsonMockResourcePreviewExtension',\n 'sample_datastore_plugin = ckanext.datastore.tests.sample_datastore_plugin:SampleDataStorePlugin',\n 'example_datastore_deleted_with_count_plugin = ckanext.datastore.tests.test_chained_action:ExampleDataStoreDeletedWithCountPlugin',\n 'example_data_store_search_sql_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleDataStoreSearchSQLPlugin',\n 'example_external_provider_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleExternalProviderPlugin',\n 'test_datastore_view = ckan.tests.lib.test_datapreview:MockDatastoreBasedResourceView',\n 'test_datapusher_plugin = ckanext.datapusher.tests.test_interfaces:FakeDataPusherPlugin',\n 'test_routing_plugin = ckan.tests.config.test_middleware:MockRoutingPlugin',\n 'test_flash_plugin = ckan.tests.config.test_sessions:FlashMessagePlugin',\n 'test_helpers_plugin = ckan.tests.lib.test_helpers:TestHelpersPlugin',\n 'test_feed_plugin = ckan.tests.controllers.test_feed:MockFeedPlugin',\n 'test_js_translations_plugin = ckan.tests.lib.test_i18n:TestJSTranslationsPlugin',\n 'legacy_mock_search_plugin = ckan.tests.legacy.logic.test_action:MockPackageSearchPlugin',\n ],\n 'babel.extractors': [\n 'ckan = ckan.lib.extract:extract_ckan',\n ],\n}\n\nextras_require = {}\n_extras_groups = [\n ('requirements', 'requirements.txt'), ('requirements-py2', 'requirements-py2.txt'),\n ('setuptools', 'requirement-setuptools.txt'), ('dev', 'dev-requirements.txt'),\n]\n\nfor group, filepath in _extras_groups:\n with open(os.path.join(HERE, filepath), 'r') as f:\n extras_require[group] = f.readlines()\n\nsetup(\n name='ckan',\n version=__version__,\n author='https://github.com/ckan/ckan/graphs/contributors',\n author_email='[email protected]',\n license=__license__,\n url='http://ckan.org/',\n description=__description__,\n keywords='data packaging component tool server',\n long_description=__long_description__,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['ckanext', 'ckanext.stats'],\n message_extractors={\n 'ckan': [\n ('**.py', 'python', None),\n ('**.js', 'javascript', None),\n ('templates/importer/**', 'ignore', None),\n ('templates/**.html', 'ckan', None),\n ('templates/**.txt', 'ckan', None),\n ('templates_legacy/**.html', 'ckan', None),\n ('public/**', 'ignore', None),\n ],\n 'ckanext': [\n ('**.py', 'python', None),\n ('**.js', 'javascript', None),\n ('**.html', 'ckan', None),\n ('multilingual/solr/*.txt', 'ignore', None),\n ]\n },\n entry_points=entry_points,\n # setup.py test command needs a TestSuite so does not work with py.test\n # tests_require=[ 'py >= 0.8.0-alpha2' ]\n python_requires=\">=3.6\",\n extras_require=extras_require,\n classifiers=[\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n", "path": "setup.py" } ]
[ { "content": "# encoding: utf-8\n\nimport os\nimport os.path\n\nfrom pkg_resources import parse_version\n\n# Avoid problem releasing to pypi from vagrant\nif os.environ.get('USER', '') == 'vagrant':\n del os.link\n\ntry:\n from setuptools import (setup, find_packages,\n __version__ as setuptools_version)\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import (setup, find_packages,\n __version__ as setuptools_version)\n\nfrom ckan import (__version__, __description__, __long_description__,\n __license__)\n\n\n#\n# Check setuptools version\n#\n\nHERE = os.path.dirname(__file__)\nwith open(os.path.join(HERE, 'requirement-setuptools.txt')) as f:\n setuptools_requirement = f.read().strip()\nmin_setuptools_version = parse_version(setuptools_requirement.split('==')[1])\nif parse_version(setuptools_version) < min_setuptools_version:\n raise AssertionError(\n 'setuptools version error\\n'\n 'You need a newer version of setuptools.\\n'\n 'Install the recommended version:\\n'\n ' pip install -r requirement-setuptools.txt\\n'\n 'and then try again to install ckan into your python environment.'\n )\n\n\nentry_points = {\n 'paste.app_factory': [\n 'main = ckan.config.middleware:make_app',\n ],\n 'paste.app_install': [\n 'main = ckan.config.install:CKANInstaller',\n ],\n 'console_scripts': [\n 'ckan = ckan.cli.cli:ckan',\n ],\n 'ckan.click_command': [\n 'datastore = ckanext.datastore.cli:datastore',\n 'datapusher = ckanext.datapusher.cli:datapusher',\n ],\n 'ckan.forms': [\n 'standard = ckan.forms.package:get_standard_fieldset',\n 'package = ckan.forms.package:get_standard_fieldset',\n 'group = ckan.forms.group:get_group_fieldset',\n 'package_group = ckan.forms.group:get_package_group_fieldset',\n ],\n 'ckan.search': [\n 'sql = ckan.lib.search.sql:SqlSearchBackend',\n 'solr = ckan.lib.search.solr_backend:SolrSearchBackend',\n ],\n 'ckan.plugins': [\n 'stats = ckanext.stats.plugin:StatsPlugin',\n 'multilingual_dataset = ckanext.multilingual.plugin:MultilingualDataset',\n 'multilingual_group = ckanext.multilingual.plugin:MultilingualGroup',\n 'multilingual_tag = ckanext.multilingual.plugin:MultilingualTag',\n 'multilingual_resource = ckanext.multilingual.plugin:MultilingualResource',\n 'expire_api_token = ckanext.expire_api_token.plugin:ExpireApiTokenPlugin',\n 'chained_functions = ckanext.chained_functions.plugin:ChainedFunctionsPlugin',\n 'datastore = ckanext.datastore.plugin:DatastorePlugin',\n 'datapusher=ckanext.datapusher.plugin:DatapusherPlugin',\n 'test_tag_vocab_plugin = ckanext.test_tag_vocab_plugin:MockVocabTagsPlugin',\n 'resource_proxy = ckanext.resourceproxy.plugin:ResourceProxy',\n 'text_view = ckanext.textview.plugin:TextView',\n 'recline_view = ckanext.reclineview.plugin:ReclineView',\n 'recline_grid_view = ckanext.reclineview.plugin:ReclineGridView',\n 'recline_graph_view = ckanext.reclineview.plugin:ReclineGraphView',\n 'recline_map_view = ckanext.reclineview.plugin:ReclineMapView',\n 'datatables_view = ckanext.datatablesview.plugin:DataTablesView',\n 'image_view = ckanext.imageview.plugin:ImageView',\n 'audio_view = ckanext.audioview.plugin:AudioView',\n 'video_view = ckanext.videoview.plugin:VideoView',\n 'webpage_view = ckanext.webpageview.plugin:WebPageView',\n # FIXME: Remove deprecated resource previews below. You should use the\n # versions as *_view instead.\n 'text_preview = ckanext.textview.plugin:TextView',\n 'recline_preview = ckanext.reclineview.plugin:ReclineView',\n 'recline_grid = ckanext.reclineview.plugin:ReclineGridView',\n 'recline_graph = ckanext.reclineview.plugin:ReclineGraphView',\n 'recline_map = ckanext.reclineview.plugin:ReclineMapView',\n # End of deprecated previews\n 'example_itemplatehelpers = ckanext.example_itemplatehelpers.plugin:ExampleITemplateHelpersPlugin',\n 'example_idatasetform = ckanext.example_idatasetform.plugin:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v1 = ckanext.example_idatasetform.plugin_v1:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v2 = ckanext.example_idatasetform.plugin_v2:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v3 = ckanext.example_idatasetform.plugin_v3:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v4 = ckanext.example_idatasetform.plugin_v4:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v5 = ckanext.example_idatasetform.plugin_v5:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v6 = ckanext.example_idatasetform.plugin_v6:ExampleIDatasetFormPlugin',\n 'example_idatasetform_v7 = ckanext.example_idatasetform.plugin_v7:ExampleIDatasetFormPlugin',\n 'example_igroupform = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin',\n 'example_igroupform_v2 = ckanext.example_igroupform.plugin_v2:ExampleIGroupFormPlugin',\n 'example_igroupform_default_group_type = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin_DefaultGroupType',\n 'example_igroupform_organization = ckanext.example_igroupform.plugin:ExampleIGroupFormOrganizationPlugin',\n 'example_iauthfunctions_v1 = ckanext.example_iauthfunctions.plugin_v1:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v2 = ckanext.example_iauthfunctions.plugin_v2:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v3 = ckanext.example_iauthfunctions.plugin_v3:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v4 = ckanext.example_iauthfunctions.plugin_v4:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v5_custom_config_setting = ckanext.example_iauthfunctions.plugin_v5_custom_config_setting:ExampleIAuthFunctionsPlugin',\n 'example_iauthfunctions_v6_parent_auth_functions = ckanext.example_iauthfunctions.plugin_v6_parent_auth_functions:ExampleIAuthFunctionsPlugin',\n 'example_theme_v01_empty_extension = ckanext.example_theme_docs.v01_empty_extension.plugin:ExampleThemePlugin',\n 'example_theme_v02_empty_template = ckanext.example_theme_docs.v02_empty_template.plugin:ExampleThemePlugin',\n 'example_theme_v03_jinja = ckanext.example_theme_docs.v03_jinja.plugin:ExampleThemePlugin',\n 'example_theme_v04_ckan_extends = ckanext.example_theme_docs.v04_ckan_extends.plugin:ExampleThemePlugin',\n 'example_theme_v05_block = ckanext.example_theme_docs.v05_block.plugin:ExampleThemePlugin',\n 'example_theme_v06_super = ckanext.example_theme_docs.v06_super.plugin:ExampleThemePlugin',\n 'example_theme_v07_helper_function = ckanext.example_theme_docs.v07_helper_function.plugin:ExampleThemePlugin',\n 'example_theme_v08_custom_helper_function = ckanext.example_theme_docs.v08_custom_helper_function.plugin:ExampleThemePlugin',\n 'example_theme_v09_snippet = ckanext.example_theme_docs.v09_snippet.plugin:ExampleThemePlugin',\n 'example_theme_v10_custom_snippet = ckanext.example_theme_docs.v10_custom_snippet.plugin:ExampleThemePlugin',\n 'example_theme_v11_HTML_and_CSS = ckanext.example_theme_docs.v11_HTML_and_CSS.plugin:ExampleThemePlugin',\n 'example_theme_v12_extra_public_dir = ckanext.example_theme_docs.v12_extra_public_dir.plugin:ExampleThemePlugin',\n 'example_theme_v13_custom_css = ckanext.example_theme_docs.v13_custom_css.plugin:ExampleThemePlugin',\n 'example_theme_v14_more_custom_css = ckanext.example_theme_docs.v14_more_custom_css.plugin:ExampleThemePlugin',\n 'example_theme_v15_webassets = ckanext.example_theme_docs.v15_webassets.plugin:ExampleThemePlugin',\n 'example_theme_v16_initialize_a_javascript_module = ckanext.example_theme_docs.v16_initialize_a_javascript_module.plugin:ExampleThemePlugin',\n 'example_theme_v17_popover = ckanext.example_theme_docs.v17_popover.plugin:ExampleThemePlugin',\n 'example_theme_v18_snippet_api = ckanext.example_theme_docs.v18_snippet_api.plugin:ExampleThemePlugin',\n 'example_theme_v19_01_error = ckanext.example_theme_docs.v19_01_error.plugin:ExampleThemePlugin',\n 'example_theme_v19_02_error_handling = ckanext.example_theme_docs.v19_02_error_handling.plugin:ExampleThemePlugin',\n 'example_theme_v20_pubsub = ckanext.example_theme_docs.v20_pubsub.plugin:ExampleThemePlugin',\n 'example_theme_v21_custom_jquery_plugin = ckanext.example_theme_docs.v21_custom_jquery_plugin.plugin:ExampleThemePlugin',\n 'example_theme_v22_webassets = ckanext.example_theme_docs.v22_webassets.plugin:ExampleThemePlugin',\n 'example_theme_custom_config_setting = ckanext.example_theme_docs.custom_config_setting.plugin:ExampleThemePlugin',\n 'example_theme_custom_emails = ckanext.example_theme_docs.custom_emails.plugin:ExampleCustomEmailsPlugin',\n 'example_iresourcecontroller = ckanext.example_iresourcecontroller.plugin:ExampleIResourceControllerPlugin',\n 'example_ivalidators = ckanext.example_ivalidators.plugin:ExampleIValidatorsPlugin',\n 'example_iconfigurer = ckanext.example_iconfigurer.plugin:ExampleIConfigurerPlugin',\n 'example_itranslation = ckanext.example_itranslation.plugin:ExampleITranslationPlugin',\n 'example_iconfigurer_v1 = ckanext.example_iconfigurer.plugin_v1:ExampleIConfigurerPlugin',\n 'example_iconfigurer_v2 = ckanext.example_iconfigurer.plugin_v2:ExampleIConfigurerPlugin',\n 'example_flask_iblueprint = ckanext.example_flask_iblueprint.plugin:ExampleFlaskIBlueprintPlugin',\n 'example_flask_streaming = ckanext.example_flask_streaming.plugin:ExampleFlaskStreamingPlugin',\n 'example_iuploader = ckanext.example_iuploader.plugin:ExampleIUploader',\n 'example_idatastorebackend = ckanext.example_idatastorebackend.plugin:ExampleIDatastoreBackendPlugin',\n 'example_ipermissionlabels = ckanext.example_ipermissionlabels.plugin:ExampleIPermissionLabelsPlugin',\n 'example_iapitoken = ckanext.example_iapitoken.plugin:ExampleIApiTokenPlugin',\n 'example_iclick = ckanext.example_iclick.plugin:ExampleIClickPlugin',\n 'example_iauthenticator = ckanext.example_iauthenticator.plugin:ExampleIAuthenticatorPlugin',\n 'example_humanizer = ckanext.example_humanizer.plugin:ExampleHumanizerPlugin',\n ],\n 'ckan.system_plugins': [\n 'synchronous_search = ckan.lib.search:SynchronousSearchPlugin',\n 'domain_object_mods = ckan.model.modification:DomainObjectModificationExtension',\n ],\n 'ckan.test_plugins': [\n 'routes_plugin = tests.legacy.ckantestplugins:RoutesPlugin',\n 'mapper_plugin = tests.legacy.ckantestplugins:MapperPlugin',\n 'session_plugin = tests.legacy.ckantestplugins:SessionPlugin',\n 'mapper_plugin2 = tests.legacy.ckantestplugins:MapperPlugin2',\n 'authorizer_plugin = tests.legacy.ckantestplugins:AuthorizerPlugin',\n 'test_observer_plugin = tests.legacy.ckantestplugins:PluginObserverPlugin',\n 'action_plugin = tests.legacy.ckantestplugins:ActionPlugin',\n 'auth_plugin = tests.legacy.ckantestplugins:AuthPlugin',\n 'test_group_plugin = tests.legacy.ckantestplugins:MockGroupControllerPlugin',\n 'test_package_controller_plugin = tests.legacy.ckantestplugins:MockPackageControllerPlugin',\n 'test_resource_preview = tests.legacy.ckantestplugins:MockResourcePreviewExtension',\n 'test_json_resource_preview = tests.legacy.ckantestplugins:JsonMockResourcePreviewExtension',\n 'sample_datastore_plugin = ckanext.datastore.tests.sample_datastore_plugin:SampleDataStorePlugin',\n 'example_datastore_deleted_with_count_plugin = ckanext.datastore.tests.test_chained_action:ExampleDataStoreDeletedWithCountPlugin',\n 'example_data_store_search_sql_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleDataStoreSearchSQLPlugin',\n 'example_external_provider_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleExternalProviderPlugin',\n 'test_datastore_view = ckan.tests.lib.test_datapreview:MockDatastoreBasedResourceView',\n 'test_datapusher_plugin = ckanext.datapusher.tests.test_interfaces:FakeDataPusherPlugin',\n 'test_routing_plugin = ckan.tests.config.test_middleware:MockRoutingPlugin',\n 'test_flash_plugin = ckan.tests.config.test_sessions:FlashMessagePlugin',\n 'test_helpers_plugin = ckan.tests.lib.test_helpers:TestHelpersPlugin',\n 'test_feed_plugin = ckan.tests.controllers.test_feed:MockFeedPlugin',\n 'test_js_translations_plugin = ckan.tests.lib.test_i18n:TestJSTranslationsPlugin',\n 'legacy_mock_search_plugin = ckan.tests.legacy.logic.test_action:MockPackageSearchPlugin',\n ],\n 'babel.extractors': [\n 'ckan = ckan.lib.extract:extract_ckan',\n ],\n}\n\nextras_require = {}\n_extras_groups = [\n ('requirements', 'requirements.txt'),\n ('setuptools', 'requirement-setuptools.txt'), ('dev', 'dev-requirements.txt'),\n]\n\nfor group, filepath in _extras_groups:\n with open(os.path.join(HERE, filepath), 'r') as f:\n extras_require[group] = f.readlines()\n\nsetup(\n name='ckan',\n version=__version__,\n author='https://github.com/ckan/ckan/graphs/contributors',\n author_email='[email protected]',\n license=__license__,\n url='http://ckan.org/',\n description=__description__,\n keywords='data packaging component tool server',\n long_description=__long_description__,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['ckanext', 'ckanext.stats'],\n message_extractors={\n 'ckan': [\n ('**.py', 'python', None),\n ('**.js', 'javascript', None),\n ('templates/importer/**', 'ignore', None),\n ('templates/**.html', 'ckan', None),\n ('templates/**.txt', 'ckan', None),\n ('templates_legacy/**.html', 'ckan', None),\n ('public/**', 'ignore', None),\n ],\n 'ckanext': [\n ('**.py', 'python', None),\n ('**.js', 'javascript', None),\n ('**.html', 'ckan', None),\n ('multilingual/solr/*.txt', 'ignore', None),\n ]\n },\n entry_points=entry_points,\n # setup.py test command needs a TestSuite so does not work with py.test\n # tests_require=[ 'py >= 0.8.0-alpha2' ]\n python_requires=\">=3.6\",\n extras_require=extras_require,\n classifiers=[\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n", "path": "setup.py" } ]
diff --git a/doc/maintaining/installing/install-from-package.rst b/doc/maintaining/installing/install-from-package.rst index a7a66272646..0976f4f673c 100644 --- a/doc/maintaining/installing/install-from-package.rst +++ b/doc/maintaining/installing/install-from-package.rst @@ -56,16 +56,6 @@ CKAN: sudo apt install -y libpq5 redis-server nginx supervisor - .. note:: If you want to install CKAN 2.9 running on Python 2 for backwards compatibility, you need to also install the Python 2 libraries: - - .. parsed-literal:: - - # On Ubuntu 18.04 - sudo apt install python2 libpython2.7 - - # On Ubuntu 20.04 - sudo apt install libpython2.7 - #. Download the CKAN package: - On Ubuntu 18.04: @@ -80,11 +70,6 @@ CKAN: wget \https://packaging.ckan.org/|latest_package_name_focal_py3| - - On Ubuntu 20.04, for Python 2: - - .. parsed-literal:: - - wget \https://packaging.ckan.org/|latest_package_name_focal_py2| #. Install the CKAN package: @@ -100,12 +85,6 @@ CKAN: sudo dpkg -i |latest_package_name_focal_py3| - - On Ubuntu 20.04, for Python 2: - - .. parsed-literal:: - - sudo dpkg -i |latest_package_name_focal_py2| - ----------------------------------- 2. Install and configure PostgreSQL diff --git a/doc/maintaining/installing/install-from-source.rst b/doc/maintaining/installing/install-from-source.rst index 0c32e9f21d1..22ec3a21440 100644 --- a/doc/maintaining/installing/install-from-source.rst +++ b/doc/maintaining/installing/install-from-source.rst @@ -29,13 +29,6 @@ required packages with this command:: sudo apt-get install python3-dev postgresql libpq-dev python3-pip python3-venv git-core solr-tomcat openjdk-8-jdk redis-server -.. note:: - - For Python 2 (deprecated, but compatible with CKAN 2.9 and earlier), do - this instead:: - - sudo apt-get install python-dev postgresql libpq-dev python-pip python-virtualenv git-core solr-tomcat openjdk-8-jdk redis-server - If you're not using a Debian-based operating system, find the best way to install the following packages on your operating system (see our `How to Install CKAN <https://github.com/ckan/ckan/wiki/How-to-Install-CKAN>`_ @@ -44,7 +37,7 @@ wiki page for help): ===================== =============================================== Package Description ===================== =============================================== -Python `The Python programming language, v3.6 or newer (or v2.7) <https://www.python.org/getit/>`_ +Python `The Python programming language, v3.6 or newer <https://www.python.org/getit/>`_ |postgres| `The PostgreSQL database system, v9.5 or newer <https://www.postgresql.org/docs/9.5/libpq.html>`_ libpq `The C programmer's interface to PostgreSQL <http://www.postgresql.org/docs/8.1/static/libpq.html>`_ pip `A tool for installing and managing Python packages <https://pip.pypa.io/en/stable/>`_ @@ -105,14 +98,6 @@ a. Create a Python `virtual environment <https://virtualenv.pypa.io/en/latest/>` |activate| -.. note:: - - For Python 2 then replace the `python3 -m venv` command with: - - .. parsed-literal:: - - virtualenv --python=/usr/bin/python2.7 --no-site-packages |virtualenv| - |activate| b. Install the recommended ``setuptools`` version and up-to-date pip: @@ -130,13 +115,6 @@ c. Install the CKAN source code into your virtualenv. pip install -e 'git+\ |git_url|\@\ |current_release_tag|\#egg=ckan[requirements]' - .. note:: - - For Python 2 replace the last fragment with `requirements-py2` - - .. parsed-literal:: - - pip install -e 'git+\ |git_url|\@\ |current_release_tag|\#egg=ckan[requirements-py2]' If you're installing CKAN for development, you may want to install the latest development version (the most recent commit on the master branch of diff --git a/doc/maintaining/upgrading/upgrade-source.rst b/doc/maintaining/upgrading/upgrade-source.rst index 502c0ad4372..5956ea892b3 100644 --- a/doc/maintaining/upgrading/upgrade-source.rst +++ b/doc/maintaining/upgrading/upgrade-source.rst @@ -45,9 +45,6 @@ CKAN release you're upgrading to: pip install --upgrade -r requirements.txt - .. note:: - - For Python 2 replace `requirements.txt` with `requirements-py2.txt` #. Register any new or updated plugins: diff --git a/requirements-py2.in b/requirements-py2.in deleted file mode 100644 index 0cfa898b29f..00000000000 --- a/requirements-py2.in +++ /dev/null @@ -1,45 +0,0 @@ -# The file contains the direct ckan requirements (python2). -# Use pip-compile to create a requirements.txt file from this -alembic==1.0.0 -Babel==2.7.0 -bleach==3.3.0 -click==7.1.2 -dominate==2.4.0 -feedgen==0.9.0 -Flask==1.1.1 -Flask-Babel==0.11.2 -flask-multistatic==1.0 -future==0.18.2 -Jinja2==2.11.3 -PyJWT==1.7.1 -Markdown==2.6.7 -passlib==1.7.3 -paste==1.7.5.1 -PasteScript==2.0.2 -polib==1.0.7 -psycopg2==2.8.2 -python-magic==0.4.15 -pysolr==3.6.0 -Pylons==0.9.7 -python-dateutil>=1.5.0 -python2-secrets==1.0.5 -pytz==2016.7 -PyUtilib==5.7.1 -pyyaml==5.3.1 -repoze.who-friendlyform==1.0.8 -repoze.who==2.3 -requests==2.24.0 -Routes==1.13 -rq==1.0 -simplejson==3.10.0 -sqlalchemy-migrate==0.12.0 -SQLAlchemy==1.3.5 -sqlparse==0.3.0 -tzlocal==1.3 -unicodecsv>=0.9 -webassets==0.12.1 -WebHelpers==1.3 -WebOb==1.0.8 -WebTest==1.4.3 # need to pin this so that Pylons does not install a newer version that conflicts with WebOb==1.0.8 -Werkzeug[watchdog]==0.16.1 -zope.interface==4.7.2 diff --git a/requirements-py2.txt b/requirements-py2.txt deleted file mode 100644 index 9678b66b593..00000000000 --- a/requirements-py2.txt +++ /dev/null @@ -1,79 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --output-file=requirements-py2.txt requirements-py2.in -# -alembic==1.0.0 # via -r requirements-py2.in -babel==2.7.0 # via -r requirements-py2.in, flask-babel -beaker==1.11.0 # via pylons -bleach==3.3.0 # via -r requirements-py2.in -certifi==2020.6.20 # via requests -chardet==3.0.4 # via requests -click==7.1.2 # via -r requirements-py2.in, flask, rq -decorator==4.4.2 # via pylons, sqlalchemy-migrate -dominate==2.4.0 # via -r requirements-py2.in -feedgen==0.9.0 # via -r requirements-py2.in -flask-babel==0.11.2 # via -r requirements-py2.in -flask-multistatic==1.0 # via -r requirements-py2.in -flask==1.1.1 # via -r requirements-py2.in, flask-babel, flask-multistatic -formencode==2.0.0 # via pylons -funcsigs==1.0.2 # via beaker -future==0.18.2 # via -r requirements-py2.in -idna==2.10 # via requests -itsdangerous==1.1.0 # via flask -jinja2==2.11.3 # via -r requirements-py2.in, flask, flask-babel -lxml==4.6.2 # via feedgen -mako==1.1.3 # via alembic, pylons -markdown==2.6.7 # via -r requirements-py2.in -markupsafe==1.1.1 # via jinja2, mako, webhelpers -nose==1.3.7 # via pylons, pyutilib -packaging==20.9 # via bleach -passlib==1.7.3 # via -r requirements-py2.in -paste==1.7.5.1 # via -r requirements-py2.in, pastescript, pylons, weberror -pastedeploy==2.1.1 # via pastescript, pylons -pastescript==2.0.2 # via -r requirements-py2.in, pylons -pathtools==0.1.2 # via watchdog -pbr==5.5.1 # via sqlalchemy-migrate -polib==1.0.7 # via -r requirements-py2.in -psycopg2==2.8.2 # via -r requirements-py2.in -pygments==2.5.2 # via weberror -pyjwt==1.7.1 # via -r requirements-py2.in -pylons==0.9.7 # via -r requirements-py2.in -pyparsing==2.4.7 # via packaging -pysolr==3.6.0 # via -r requirements-py2.in -python-dateutil==2.8.1 # via -r requirements-py2.in, alembic, feedgen -python-editor==1.0.4 # via alembic -python-magic==0.4.15 # via -r requirements-py2.in -python2-secrets==1.0.5 # via -r requirements-py2.in -pytz==2016.7 # via -r requirements-py2.in, babel, tzlocal -pyutilib==5.7.1 # via -r requirements-py2.in -pyyaml==5.3.1 # via -r requirements-py2.in -redis==3.5.3 # via rq -repoze.lru==0.7 # via routes -repoze.who-friendlyform==1.0.8 # via -r requirements-py2.in -repoze.who==2.3 # via -r requirements-py2.in, repoze.who-friendlyform -requests==2.24.0 # via -r requirements-py2.in, pysolr -routes==1.13 # via -r requirements-py2.in, pylons -rq==1.0 # via -r requirements-py2.in -simplejson==3.10.0 # via -r requirements-py2.in, pylons -six==1.15.0 # via bleach, formencode, pastescript, python-dateutil, pyutilib, sqlalchemy-migrate -sqlalchemy-migrate==0.12.0 # via -r requirements-py2.in -sqlalchemy==1.3.5 # via -r requirements-py2.in, alembic, sqlalchemy-migrate -sqlparse==0.3.0 # via -r requirements-py2.in, sqlalchemy-migrate -tempita==0.5.2 # via pylons, sqlalchemy-migrate, weberror -tzlocal==1.3 # via -r requirements-py2.in -unicodecsv==0.14.1 # via -r requirements-py2.in -urllib3==1.25.11 # via requests -watchdog==0.10.3 # via werkzeug -webassets==0.12.1 # via -r requirements-py2.in -webencodings==0.5.1 # via bleach -weberror==0.13.1 # via pylons -webhelpers==1.3 # via -r requirements-py2.in, pylons -webob==1.0.8 # via -r requirements-py2.in, pylons, repoze.who, repoze.who-friendlyform, weberror, webtest -webtest==1.4.3 # via -r requirements-py2.in, pylons -werkzeug[watchdog]==0.16.1 # via -r requirements-py2.in, flask -zope.interface==4.7.2 # via -r requirements-py2.in, repoze.who, repoze.who-friendlyform - -# The following packages are considered to be unsafe in a requirements file: -# setuptools diff --git a/setup.py b/setup.py index 6844c83727d..4a8be4e9d5c 100644 --- a/setup.py +++ b/setup.py @@ -191,7 +191,7 @@ extras_require = {} _extras_groups = [ - ('requirements', 'requirements.txt'), ('requirements-py2', 'requirements-py2.txt'), + ('requirements', 'requirements.txt'), ('setuptools', 'requirement-setuptools.txt'), ('dev', 'dev-requirements.txt'), ]
cocotb__cocotb-999
Profiling bug When i enable the profiling i got the following traceback: ```make COCOTB_ENABLE_PROFILING=true ... Traceback (most recent call last): File "/home/ademski/Documents/satellogic/hdlteam/cocotb/cocotb/__init__.py", line 41, in <module> from cocotb.scheduler import Scheduler File "/home/ademski/Documents/satellogic/hdlteam/cocotb/cocotb/scheduler.py", line 51, in <module> import cProfile, StringIO, pstats ModuleNotFoundError: No module named 'StringIO ``` StringIO is not used in `scheduler.py`. I solved the problem removing that import. Python version: 3.7.3
[ { "content": "#!/usr/bin/env python\n\n# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Coroutine scheduler.\n\n\nFIXME: We have a problem here. If a coroutine schedules a read-only but we\nalso have pending writes we have to schedule the ReadWrite callback before\nthe ReadOnly (and this is invalid, at least in Modelsim).\n\"\"\"\nimport collections\nimport copy\nimport os\nimport time\nimport logging\nimport threading\n\nif \"COCOTB_SIM\" in os.environ:\n import simulator\nelse:\n simulator = None\n\n# Debug mode controlled by environment variables\nif \"COCOTB_ENABLE_PROFILING\" in os.environ:\n import cProfile, StringIO, pstats\n _profile = cProfile.Profile()\n _profiling = True\nelse:\n _profiling = False\n\n# Sadly the Python standard logging module is very slow so it's better not to\n# make any calls by testing a boolean flag first\nif \"COCOTB_SCHEDULER_DEBUG\" in os.environ:\n _debug = True\nelse:\n _debug = False\n\n\nimport cocotb\nimport cocotb.decorators\nfrom cocotb.triggers import (Trigger, GPITrigger, Timer, ReadOnly, PythonTrigger,\n NextTimeStep, ReadWrite, Event, Join, NullTrigger)\nfrom cocotb.log import SimLog\nfrom cocotb.result import (TestComplete, TestError, ReturnValue, raise_error,\n create_error, ExternalException)\nfrom cocotb.utils import nullcontext\n\n\nclass InternalError(RuntimeError):\n \"\"\" An error internal to scheduler. If you see this, report a bug! \"\"\"\n pass\n\n\nclass profiling_context(object):\n \"\"\" Context manager that profiles its contents \"\"\"\n def __enter__(self):\n _profile.enable()\n\n def __exit__(self, *excinfo):\n _profile.disable()\n\n\nfrom cocotb import outcomes\n\nclass external_state(object):\n INIT = 0\n RUNNING = 1\n PAUSED = 2\n EXITED = 3\n\[email protected]\nclass external_waiter(object):\n\n def __init__(self):\n self._outcome = None\n self.thread = None\n self.event = Event()\n self.state = external_state.INIT\n self.cond = threading.Condition()\n self._log = SimLog(\"cocotb.external.thead.%s\" % self.thread, id(self))\n\n @property\n def result(self):\n return self._outcome.get()\n\n def _propogate_state(self, new_state):\n with self.cond:\n if _debug:\n self._log.debug(\"Changing state from %d -> %d from %s\" % (self.state, new_state, threading.current_thread()))\n self.state = new_state\n self.cond.notify()\n\n def thread_done(self):\n if _debug:\n self._log.debug(\"Thread finished from %s\" % (threading.current_thread()))\n self._propogate_state(external_state.EXITED)\n\n def thread_suspend(self):\n self._propogate_state(external_state.PAUSED)\n\n def thread_start(self):\n if self.state > external_state.INIT:\n return\n\n if not self.thread.is_alive():\n self._propogate_state(external_state.RUNNING)\n self.thread.start()\n\n def thread_resume(self):\n self._propogate_state(external_state.RUNNING)\n\n def thread_wait(self):\n if _debug:\n self._log.debug(\"Waiting for the condition lock %s\" % threading.current_thread())\n\n with self.cond:\n while self.state == external_state.RUNNING:\n self.cond.wait()\n\n if _debug:\n if self.state == external_state.EXITED:\n self._log.debug(\"Thread %s has exited from %s\" % (self.thread, threading.current_thread()))\n elif self.state == external_state.PAUSED:\n self._log.debug(\"Thread %s has called yield from %s\" % (self.thread, threading.current_thread()))\n elif self.state == external_state.RUNNING:\n self._log.debug(\"Thread %s is in RUNNING from %d\" % (self.thread, threading.current_thread()))\n\n if self.state == external_state.INIT:\n raise Exception(\"Thread %s state was not allowed from %s\" % (self.thread, threading.current_thread()))\n\n return self.state\n\nclass Scheduler(object):\n \"\"\"The main scheduler.\n\n Here we accept callbacks from the simulator and schedule the appropriate\n coroutines.\n\n A callback fires, causing the :any:`react` method to be called, with the\n trigger that caused the callback as the first argument.\n\n We look up a list of coroutines to schedule (indexed by the trigger) and\n schedule them in turn. NB implementors should not depend on the scheduling\n order!\n\n Some additional management is required since coroutines can return a list\n of triggers, to be scheduled when any one of the triggers fires. To\n ensure we don't receive spurious callbacks, we have to un-prime all the\n other triggers when any one fires.\n\n Due to the simulator nuances and fun with delta delays we have the\n following modes:\n\n Normal mode\n - Callbacks cause coroutines to be scheduled\n - Any pending writes are cached and do not happen immediately\n\n ReadOnly mode\n - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle\n (VHPI). In this state we are not allowed to perform writes.\n\n Write mode\n - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)\n In this mode we play back all the cached write updates.\n\n We can legally transition from normal->write by registering a ReadWrite\n callback, however usually once a simulator has entered the ReadOnly phase\n of a given timestep then we must move to a new timestep before performing\n any writes. The mechanism for moving to a new timestep may not be\n consistent across simulators and therefore we provide an abstraction to\n assist with compatibility.\n\n\n Unless a coroutine has explicitly requested to be scheduled in ReadOnly\n mode (for example wanting to sample the finally settled value after all\n delta delays) then it can reasonably be expected to be scheduled during\n \"normal mode\" i.e. where writes are permitted.\n \"\"\"\n\n _MODE_NORMAL = 1 # noqa\n _MODE_READONLY = 2 # noqa\n _MODE_WRITE = 3 # noqa\n _MODE_TERM = 4 # noqa\n\n # Singleton events, recycled to avoid spurious object creation\n _next_time_step = NextTimeStep()\n _read_write = ReadWrite()\n _read_only = ReadOnly()\n _timer1 = Timer(1)\n\n def __init__(self):\n\n self.log = SimLog(\"cocotb.scheduler\")\n if _debug:\n self.log.setLevel(logging.DEBUG)\n\n # Use OrderedDict here for deterministic behavior (gh-934)\n\n # A dictionary of pending coroutines for each trigger,\n # indexed by trigger\n self._trigger2coros = collections.OrderedDict()\n\n # A dictionary mapping coroutines to the trigger they are waiting for\n self._coro2trigger = collections.OrderedDict()\n\n # Our main state\n self._mode = Scheduler._MODE_NORMAL\n\n # A dictionary of pending writes\n self._writes = collections.OrderedDict()\n\n self._pending_coros = []\n self._pending_triggers = []\n self._pending_threads = []\n self._pending_events = [] # Events we need to call set on once we've unwound\n\n self._terminate = False\n self._test_result = None\n self._entrypoint = None\n self._main_thread = threading.current_thread()\n\n self._is_reacting = False\n\n self._write_coro_inst = None\n self._writes_pending = Event()\n\n @cocotb.decorators.coroutine\n def _do_writes(self):\n \"\"\" An internal coroutine that performs pending writes \"\"\"\n while True:\n yield self._writes_pending.wait()\n if self._mode != Scheduler._MODE_NORMAL:\n yield self._next_time_step\n\n yield self._read_write\n\n while self._writes:\n handle, value = self._writes.popitem()\n handle.setimmediatevalue(value)\n self._writes_pending.clear()\n\n def _check_termination(self):\n \"\"\"\n Handle a termination that causes us to move onto the next test.\n \"\"\"\n if self._terminate:\n if _debug:\n self.log.debug(\"Test terminating, scheduling Timer\")\n\n if self._write_coro_inst is not None:\n self._write_coro_inst.kill()\n self._write_coro_inst = None\n\n for t in self._trigger2coros:\n t.unprime()\n\n if self._timer1.primed:\n self._timer1.unprime()\n\n self._timer1.prime(self.begin_test)\n self._trigger2coros = collections.OrderedDict()\n self._coro2trigger = collections.OrderedDict()\n self._terminate = False\n self._writes = collections.OrderedDict()\n self._writes_pending.clear()\n self._mode = Scheduler._MODE_TERM\n\n def begin_test(self, trigger=None):\n \"\"\"Called to initiate a test.\n\n Could be called on start-up or from a callback.\n \"\"\"\n if _debug:\n self.log.debug(\"begin_test called with trigger: %s\" %\n (str(trigger)))\n if _profiling:\n ps = pstats.Stats(_profile).sort_stats('cumulative')\n ps.dump_stats(\"test_profile.pstat\")\n ctx = profiling_context()\n else:\n ctx = nullcontext()\n\n with ctx:\n self._mode = Scheduler._MODE_NORMAL\n if trigger is not None:\n trigger.unprime()\n\n # Issue previous test result, if there is one\n if self._test_result is not None:\n if _debug:\n self.log.debug(\"Issue test result to regression object\")\n cocotb.regression_manager.handle_result(self._test_result)\n self._test_result = None\n if self._entrypoint is not None:\n test = self._entrypoint\n self._entrypoint = None\n self.schedule(test)\n self._check_termination()\n\n def react(self, trigger):\n \"\"\"\n Called when a trigger fires.\n\n We ensure that we only start the event loop once, rather than\n letting it recurse.\n \"\"\"\n if self._is_reacting:\n # queue up the trigger, the event loop will get to it\n self._pending_triggers.append(trigger)\n return\n\n if self._pending_triggers:\n raise InternalError(\n \"Expected all triggers to be handled but found {}\"\n .format(self._pending_triggers)\n )\n\n # start the event loop\n self._is_reacting = True\n try:\n self._event_loop(trigger)\n finally:\n self._is_reacting = False\n\n\n def _event_loop(self, trigger):\n \"\"\"\n Run an event loop triggered by the given trigger.\n\n The loop will keep running until no further triggers fire.\n\n This should be triggered by only:\n * The beginning of a test, when there is no trigger to react to\n * A GPI trigger\n \"\"\"\n if _profiling:\n ctx = profiling_context()\n else:\n ctx = nullcontext()\n\n with ctx:\n # When a trigger fires it is unprimed internally\n if _debug:\n self.log.debug(\"Trigger fired: %s\" % str(trigger))\n # trigger.unprime()\n\n if self._mode == Scheduler._MODE_TERM:\n if _debug:\n self.log.debug(\"Ignoring trigger %s since we're terminating\" %\n str(trigger))\n return\n\n if trigger is self._read_only:\n self._mode = Scheduler._MODE_READONLY\n # Only GPI triggers affect the simulator scheduling mode\n elif isinstance(trigger, GPITrigger):\n self._mode = Scheduler._MODE_NORMAL\n\n # work through triggers one by one\n is_first = True\n self._pending_triggers.append(trigger)\n while self._pending_triggers:\n trigger = self._pending_triggers.pop(0)\n\n if not is_first and isinstance(trigger, GPITrigger):\n self.log.warning(\n \"A GPI trigger occurred after entering react - this \"\n \"should not happen.\"\n )\n assert False\n\n # this only exists to enable the warning above\n is_first = False\n\n if trigger not in self._trigger2coros:\n\n # GPI triggers should only be ever pending if there is an\n # associated coroutine waiting on that trigger, otherwise it would\n # have been unprimed already\n if isinstance(trigger, GPITrigger):\n self.log.critical(\n \"No coroutines waiting on trigger that fired: %s\" %\n str(trigger))\n\n trigger.log.info(\"I'm the culprit\")\n # For Python triggers this isn't actually an error - we might do\n # event.set() without knowing whether any coroutines are actually\n # waiting on this event, for example\n elif _debug:\n self.log.debug(\n \"No coroutines waiting on trigger that fired: %s\" %\n str(trigger))\n\n del trigger\n continue\n\n # Scheduled coroutines may append to our waiting list so the first\n # thing to do is pop all entries waiting on this trigger.\n scheduling = self._trigger2coros.pop(trigger)\n\n if _debug:\n debugstr = \"\\n\\t\".join([coro.__name__ for coro in scheduling])\n if len(scheduling):\n debugstr = \"\\n\\t\" + debugstr\n self.log.debug(\"%d pending coroutines for event %s%s\" %\n (len(scheduling), str(trigger), debugstr))\n\n # This trigger isn't needed any more\n trigger.unprime()\n\n for coro in scheduling:\n if _debug:\n self.log.debug(\"Scheduling coroutine %s\" % (coro.__name__))\n self.schedule(coro, trigger=trigger)\n if _debug:\n self.log.debug(\"Scheduled coroutine %s\" % (coro.__name__))\n\n # Schedule may have queued up some events so we'll burn through those\n while self._pending_events:\n if _debug:\n self.log.debug(\"Scheduling pending event %s\" %\n (str(self._pending_events[0])))\n self._pending_events.pop(0).set()\n\n # remove our reference to the objects at the end of each loop,\n # to try and avoid them being destroyed at a weird time (as\n # happened in gh-957)\n del trigger\n del coro\n del scheduling\n\n # no more pending triggers\n self._check_termination()\n if _debug:\n self.log.debug(\"All coroutines scheduled, handing control back\"\n \" to simulator\")\n\n\n def unschedule(self, coro):\n \"\"\"Unschedule a coroutine. Unprime any pending triggers\"\"\"\n\n # Unprime the trigger this coroutine is waiting on\n try:\n trigger = self._coro2trigger.pop(coro)\n except KeyError:\n # coroutine probably finished\n pass\n else:\n if coro in self._trigger2coros.setdefault(trigger, []):\n self._trigger2coros[trigger].remove(coro)\n if not self._trigger2coros[trigger]:\n trigger.unprime()\n del self._trigger2coros[trigger]\n\n if Join(coro) in self._trigger2coros:\n self.react(Join(coro))\n else:\n try:\n # throws an error if the background coroutine errored\n # and no one was monitoring it\n coro.retval\n except TestComplete as test_result:\n self.log.debug(\"TestComplete received: {}\".format(test_result.__class__.__name__))\n self.finish_test(test_result)\n except Exception as e:\n self.finish_test(create_error(self, \"Forked coroutine {} raised exception: {}\".format(coro, e)))\n\n def save_write(self, handle, value):\n if self._mode == Scheduler._MODE_READONLY:\n raise Exception(\"Write to object {0} was scheduled during a read-only sync phase.\".format(handle._name))\n\n # TODO: we should be able to better keep track of when this needs to\n # be scheduled\n if self._write_coro_inst is None:\n self._write_coro_inst = self._do_writes()\n self.schedule(self._write_coro_inst)\n\n self._writes[handle] = value\n self._writes_pending.set()\n\n def _coroutine_yielded(self, coro, trigger):\n \"\"\"Prime the trigger and update our internal mappings.\"\"\"\n self._coro2trigger[coro] = trigger\n\n trigger_coros = self._trigger2coros.setdefault(trigger, [])\n if coro is self._write_coro_inst:\n # Our internal write coroutine always runs before any user coroutines.\n # This preserves the behavior prior to the refactoring of writes to\n # this coroutine.\n trigger_coros.insert(0, coro)\n else:\n # Everything else joins the back of the queue\n trigger_coros.append(coro)\n\n if not trigger.primed:\n\n if trigger_coros != [coro]:\n # should never happen\n raise InternalError(\n \"More than one coroutine waiting on an unprimed trigger\")\n\n try:\n trigger.prime(self.react)\n except Exception as e:\n # discard the trigger we associated, it will never fire\n self._trigger2coros.pop(trigger)\n\n # replace it with a new trigger that throws back the exception\n error_trigger = NullTrigger(outcome=outcomes.Error(e))\n self._coro2trigger[coro] = error_trigger\n self._trigger2coros[error_trigger] = [coro]\n\n # wake up the coroutines\n error_trigger.prime(self.react)\n\n def queue(self, coroutine):\n \"\"\"Queue a coroutine for execution\"\"\"\n self._pending_coros.append(coroutine)\n\n def queue_function(self, coroutine):\n \"\"\"Queue a coroutine for execution and move the containing thread\n so that it does not block execution of the main thread any longer.\n \"\"\"\n # We should be able to find ourselves inside the _pending_threads list\n matching_threads = [\n t\n for t in self._pending_threads\n if t.thread == threading.current_thread()\n ]\n if len(matching_threads) == 0:\n raise RuntimeError(\"queue_function called from unrecognized thread\")\n\n # Raises if there is more than one match. This can never happen, since\n # each entry always has a unique thread.\n t, = matching_threads\n\n t.thread_suspend()\n self._pending_coros.append(coroutine)\n return t\n\n def run_in_executor(self, func, *args, **kwargs):\n \"\"\"Run the coroutine in a separate execution thread\n and return a yieldable object for the caller.\n \"\"\"\n # Create a thread\n # Create a trigger that is called as a result of the thread finishing\n # Create an Event object that the caller can yield on\n # Event object set when the thread finishes execution, this blocks the\n # calling coroutine (but not the thread) until the external completes\n\n def execute_external(func, _waiter):\n _waiter._outcome = outcomes.capture(func, *args, **kwargs)\n if _debug:\n self.log.debug(\"Execution of external routine done %s\" % threading.current_thread())\n _waiter.thread_done()\n\n waiter = external_waiter()\n thread = threading.Thread(group=None, target=execute_external,\n name=func.__name__ + \"_thread\",\n args=([func, waiter]), kwargs={})\n\n waiter.thread = thread\n self._pending_threads.append(waiter)\n\n return waiter\n\n def add(self, coroutine):\n \"\"\"Add a new coroutine.\n\n Just a wrapper around self.schedule which provides some debug and\n useful error messages in the event of common gotchas.\n \"\"\"\n if isinstance(coroutine, cocotb.decorators.coroutine):\n raise TypeError(\n \"Attempt to schedule a coroutine that hasn't started: {}.\\n\"\n \"Did you forget to add parentheses to the @cocotb.test() \"\n \"decorator?\"\n .format(coroutine)\n )\n\n elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):\n raise TypeError(\n \"Attempt to add a object of type {} to the scheduler, which \"\n \"isn't a coroutine: {!r}\\n\"\n \"Did you forget to use the @cocotb.coroutine decorator?\"\n .format(type(coroutine), coroutine)\n )\n\n if _debug:\n self.log.debug(\"Adding new coroutine %s\" % coroutine.__name__)\n\n self.schedule(coroutine)\n self._check_termination()\n return coroutine\n\n def new_test(self, coroutine):\n self._entrypoint = coroutine\n\n # This collection of functions parses a trigger out of the object\n # that was yielded by a coroutine, converting `list` -> `Waitable`,\n # `Waitable` -> `RunningCoroutine`, `RunningCoroutine` -> `Trigger`.\n # Doing them as separate functions allows us to avoid repeating unencessary\n # `isinstance` checks.\n\n def _trigger_from_started_coro(self, result):\n # type: (RunningCoroutine) -> Trigger\n if _debug:\n self.log.debug(\"Joining to already running coroutine: %s\" %\n result.__name__)\n return result.join()\n\n def _trigger_from_unstarted_coro(self, result):\n # type: (RunningCoroutine) -> Trigger\n self.queue(result)\n if _debug:\n self.log.debug(\"Scheduling nested coroutine: %s\" %\n result.__name__)\n return result.join()\n\n def _trigger_from_waitable(self, result):\n # type: (Waitable) -> Trigger\n return self._trigger_from_unstarted_coro(result._wait())\n\n def _trigger_from_list(self, result):\n # type: (list) -> Trigger\n return self._trigger_from_waitable(cocotb.triggers.First(*result))\n\n def _trigger_from_any(self, result):\n \"\"\"Convert a yielded object into a Trigger instance\"\"\"\n # note: the order of these can significantly impact performance\n\n if isinstance(result, Trigger):\n return result\n\n if isinstance(result, cocotb.decorators.RunningCoroutine):\n if not result.has_started():\n return self._trigger_from_unstarted_coro(result)\n else:\n return self._trigger_from_started_coro(result)\n\n if isinstance(result, list):\n return self._trigger_from_list(result)\n\n if isinstance(result, cocotb.triggers.Waitable):\n return self._trigger_from_waitable(result)\n\n raise TypeError(\n \"Coroutine yielded an object of type {}, which the scheduler can't \"\n \"handle: {!r}\\n\"\n \"Did you forget to decorate with @cocotb.coroutine?\"\n .format(type(result), result)\n )\n\n def schedule(self, coroutine, trigger=None):\n \"\"\"Schedule a coroutine by calling the send method.\n\n Args:\n coroutine (cocotb.decorators.coroutine): The coroutine to schedule.\n trigger (cocotb.triggers.Trigger): The trigger that caused this\n coroutine to be scheduled.\n \"\"\"\n if trigger is None:\n send_outcome = outcomes.Value(None)\n else:\n send_outcome = trigger._outcome\n if _debug:\n self.log.debug(\"Scheduling with {}\".format(send_outcome))\n\n try:\n result = coroutine._advance(send_outcome)\n if _debug:\n self.log.debug(\"Coroutine %s yielded %s (mode %d)\" %\n (coroutine.__name__, str(result), self._mode))\n\n # TestComplete indication is game over, tidy up\n except TestComplete as test_result:\n # Tag that close down is needed, save the test_result\n # for later use in cleanup handler\n self.log.debug(\"TestComplete received: %s\" % test_result.__class__.__name__)\n self.finish_test(test_result)\n return\n\n # Normal coroutine completion\n except cocotb.decorators.CoroutineComplete as exc:\n if _debug:\n self.log.debug(\"Coroutine completed: %s\" % str(coroutine))\n self.unschedule(coroutine)\n return\n\n # Don't handle the result if we're shutting down\n if self._terminate:\n return\n\n try:\n result = self._trigger_from_any(result)\n except TypeError as exc:\n # restart this coroutine with an exception object telling it that\n # it wasn't allowed to yield that\n result = NullTrigger(outcome=outcomes.Error(exc))\n\n self._coroutine_yielded(coroutine, result)\n\n # We do not return from here until pending threads have completed, but only\n # from the main thread, this seems like it could be problematic in cases\n # where a sim might change what this thread is.\n def unblock_event(ext):\n @cocotb.coroutine\n def wrapper():\n ext.event.set()\n yield PythonTrigger()\n\n if self._main_thread is threading.current_thread():\n\n for ext in self._pending_threads:\n ext.thread_start()\n if _debug:\n self.log.debug(\"Blocking from %s on %s\" % (threading.current_thread(), ext.thread))\n state = ext.thread_wait()\n if _debug:\n self.log.debug(\"Back from wait on self %s with newstate %d\" % (threading.current_thread(), state))\n if state == external_state.EXITED:\n self._pending_threads.remove(ext)\n self._pending_events.append(ext.event)\n\n # Handle any newly queued coroutines that need to be scheduled\n while self._pending_coros:\n self.add(self._pending_coros.pop(0))\n\n def finish_test(self, test_result):\n \"\"\"Cache the test result and set the terminate flag.\"\"\"\n self.log.debug(\"finish_test called with %s\" % (repr(test_result)))\n if not self._terminate:\n self._terminate = True\n self._test_result = test_result\n self.cleanup()\n\n def finish_scheduler(self, test_result):\n \"\"\"Directly call into the regression manager and end test\n once we return the sim will close us so no cleanup is needed.\n \"\"\"\n self.log.debug(\"Issue sim closedown result to regression object\")\n cocotb.regression_manager.handle_result(test_result)\n\n def cleanup(self):\n \"\"\"Clear up all our state.\n\n Unprime all pending triggers and kill off any coroutines stop all externals.\n \"\"\"\n # copy since we modify this in kill\n items = list(self._trigger2coros.items())\n\n # reversing seems to fix gh-928, although the order is still somewhat\n # arbitrary.\n for trigger, waiting in items[::-1]:\n for coro in waiting:\n if _debug:\n self.log.debug(\"Killing %s\" % str(coro))\n coro.kill()\n\n if self._main_thread is not threading.current_thread():\n raise Exception(\"Cleanup() called outside of the main thread\")\n\n for ext in self._pending_threads:\n self.log.warn(\"Waiting for %s to exit\", ext.thread)\n", "path": "cocotb/scheduler.py" } ]
[ { "content": "#!/usr/bin/env python\n\n# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Coroutine scheduler.\n\n\nFIXME: We have a problem here. If a coroutine schedules a read-only but we\nalso have pending writes we have to schedule the ReadWrite callback before\nthe ReadOnly (and this is invalid, at least in Modelsim).\n\"\"\"\nimport collections\nimport copy\nimport os\nimport time\nimport logging\nimport threading\n\nif \"COCOTB_SIM\" in os.environ:\n import simulator\nelse:\n simulator = None\n\n# Debug mode controlled by environment variables\nif \"COCOTB_ENABLE_PROFILING\" in os.environ:\n import cProfile, pstats\n _profile = cProfile.Profile()\n _profiling = True\nelse:\n _profiling = False\n\n# Sadly the Python standard logging module is very slow so it's better not to\n# make any calls by testing a boolean flag first\nif \"COCOTB_SCHEDULER_DEBUG\" in os.environ:\n _debug = True\nelse:\n _debug = False\n\n\nimport cocotb\nimport cocotb.decorators\nfrom cocotb.triggers import (Trigger, GPITrigger, Timer, ReadOnly, PythonTrigger,\n NextTimeStep, ReadWrite, Event, Join)\nfrom cocotb.log import SimLog\nfrom cocotb.result import (TestComplete, TestError, ReturnValue, raise_error,\n create_error, ExternalException)\nfrom cocotb.utils import nullcontext\n\n\nclass InternalError(RuntimeError):\n \"\"\" An error internal to scheduler. If you see this, report a bug! \"\"\"\n pass\n\n\nclass profiling_context(object):\n \"\"\" Context manager that profiles its contents \"\"\"\n def __enter__(self):\n _profile.enable()\n\n def __exit__(self, *excinfo):\n _profile.disable()\n\n\nfrom cocotb import outcomes\n\nclass external_state(object):\n INIT = 0\n RUNNING = 1\n PAUSED = 2\n EXITED = 3\n\[email protected]\nclass external_waiter(object):\n\n def __init__(self):\n self._outcome = None\n self.thread = None\n self.event = Event()\n self.state = external_state.INIT\n self.cond = threading.Condition()\n self._log = SimLog(\"cocotb.external.thead.%s\" % self.thread, id(self))\n\n @property\n def result(self):\n return self._outcome.get()\n\n def _propogate_state(self, new_state):\n with self.cond:\n if _debug:\n self._log.debug(\"Changing state from %d -> %d from %s\" % (self.state, new_state, threading.current_thread()))\n self.state = new_state\n self.cond.notify()\n\n def thread_done(self):\n if _debug:\n self._log.debug(\"Thread finished from %s\" % (threading.current_thread()))\n self._propogate_state(external_state.EXITED)\n\n def thread_suspend(self):\n self._propogate_state(external_state.PAUSED)\n\n def thread_start(self):\n if self.state > external_state.INIT:\n return\n\n if not self.thread.is_alive():\n self._propogate_state(external_state.RUNNING)\n self.thread.start()\n\n def thread_resume(self):\n self._propogate_state(external_state.RUNNING)\n\n def thread_wait(self):\n if _debug:\n self._log.debug(\"Waiting for the condition lock %s\" % threading.current_thread())\n\n with self.cond:\n while self.state == external_state.RUNNING:\n self.cond.wait()\n\n if _debug:\n if self.state == external_state.EXITED:\n self._log.debug(\"Thread %s has exited from %s\" % (self.thread, threading.current_thread()))\n elif self.state == external_state.PAUSED:\n self._log.debug(\"Thread %s has called yield from %s\" % (self.thread, threading.current_thread()))\n elif self.state == external_state.RUNNING:\n self._log.debug(\"Thread %s is in RUNNING from %d\" % (self.thread, threading.current_thread()))\n\n if self.state == external_state.INIT:\n raise Exception(\"Thread %s state was not allowed from %s\" % (self.thread, threading.current_thread()))\n\n return self.state\n\nclass Scheduler(object):\n \"\"\"The main scheduler.\n\n Here we accept callbacks from the simulator and schedule the appropriate\n coroutines.\n\n A callback fires, causing the :any:`react` method to be called, with the\n trigger that caused the callback as the first argument.\n\n We look up a list of coroutines to schedule (indexed by the trigger) and\n schedule them in turn. NB implementors should not depend on the scheduling\n order!\n\n Some additional management is required since coroutines can return a list\n of triggers, to be scheduled when any one of the triggers fires. To\n ensure we don't receive spurious callbacks, we have to un-prime all the\n other triggers when any one fires.\n\n Due to the simulator nuances and fun with delta delays we have the\n following modes:\n\n Normal mode\n - Callbacks cause coroutines to be scheduled\n - Any pending writes are cached and do not happen immediately\n\n ReadOnly mode\n - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle\n (VHPI). In this state we are not allowed to perform writes.\n\n Write mode\n - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)\n In this mode we play back all the cached write updates.\n\n We can legally transition from normal->write by registering a ReadWrite\n callback, however usually once a simulator has entered the ReadOnly phase\n of a given timestep then we must move to a new timestep before performing\n any writes. The mechanism for moving to a new timestep may not be\n consistent across simulators and therefore we provide an abstraction to\n assist with compatibility.\n\n\n Unless a coroutine has explicitly requested to be scheduled in ReadOnly\n mode (for example wanting to sample the finally settled value after all\n delta delays) then it can reasonably be expected to be scheduled during\n \"normal mode\" i.e. where writes are permitted.\n \"\"\"\n\n _MODE_NORMAL = 1 # noqa\n _MODE_READONLY = 2 # noqa\n _MODE_WRITE = 3 # noqa\n _MODE_TERM = 4 # noqa\n\n # Singleton events, recycled to avoid spurious object creation\n _next_time_step = NextTimeStep()\n _read_write = ReadWrite()\n _read_only = ReadOnly()\n _timer1 = Timer(1)\n\n def __init__(self):\n\n self.log = SimLog(\"cocotb.scheduler\")\n if _debug:\n self.log.setLevel(logging.DEBUG)\n\n # Use OrderedDict here for deterministic behavior (gh-934)\n\n # A dictionary of pending coroutines for each trigger,\n # indexed by trigger\n self._trigger2coros = collections.OrderedDict()\n\n # A dictionary mapping coroutines to the trigger they are waiting for\n self._coro2trigger = collections.OrderedDict()\n\n # Our main state\n self._mode = Scheduler._MODE_NORMAL\n\n # A dictionary of pending writes\n self._writes = collections.OrderedDict()\n\n self._pending_coros = []\n self._pending_triggers = []\n self._pending_threads = []\n self._pending_events = [] # Events we need to call set on once we've unwound\n\n self._terminate = False\n self._test_result = None\n self._entrypoint = None\n self._main_thread = threading.current_thread()\n\n self._is_reacting = False\n\n self._write_coro_inst = None\n self._writes_pending = Event()\n\n @cocotb.decorators.coroutine\n def _do_writes(self):\n \"\"\" An internal coroutine that performs pending writes \"\"\"\n while True:\n yield self._writes_pending.wait()\n if self._mode != Scheduler._MODE_NORMAL:\n yield self._next_time_step\n\n yield self._read_write\n\n while self._writes:\n handle, value = self._writes.popitem()\n handle.setimmediatevalue(value)\n self._writes_pending.clear()\n\n def _check_termination(self):\n \"\"\"\n Handle a termination that causes us to move onto the next test.\n \"\"\"\n if self._terminate:\n if _debug:\n self.log.debug(\"Test terminating, scheduling Timer\")\n\n if self._write_coro_inst is not None:\n self._write_coro_inst.kill()\n self._write_coro_inst = None\n\n for t in self._trigger2coros:\n t.unprime()\n\n if self._timer1.primed:\n self._timer1.unprime()\n\n self._timer1.prime(self.begin_test)\n self._trigger2coros = collections.OrderedDict()\n self._coro2trigger = collections.OrderedDict()\n self._terminate = False\n self._writes = collections.OrderedDict()\n self._writes_pending.clear()\n self._mode = Scheduler._MODE_TERM\n\n def begin_test(self, trigger=None):\n \"\"\"Called to initiate a test.\n\n Could be called on start-up or from a callback.\n \"\"\"\n if _debug:\n self.log.debug(\"begin_test called with trigger: %s\" %\n (str(trigger)))\n if _profiling:\n ps = pstats.Stats(_profile).sort_stats('cumulative')\n ps.dump_stats(\"test_profile.pstat\")\n ctx = profiling_context()\n else:\n ctx = nullcontext()\n\n with ctx:\n self._mode = Scheduler._MODE_NORMAL\n if trigger is not None:\n trigger.unprime()\n\n # Issue previous test result, if there is one\n if self._test_result is not None:\n if _debug:\n self.log.debug(\"Issue test result to regression object\")\n cocotb.regression_manager.handle_result(self._test_result)\n self._test_result = None\n if self._entrypoint is not None:\n test = self._entrypoint\n self._entrypoint = None\n self.schedule(test)\n self._check_termination()\n\n def react(self, trigger):\n \"\"\"\n Called when a trigger fires.\n\n We ensure that we only start the event loop once, rather than\n letting it recurse.\n \"\"\"\n if self._is_reacting:\n # queue up the trigger, the event loop will get to it\n self._pending_triggers.append(trigger)\n return\n\n if self._pending_triggers:\n raise InternalError(\n \"Expected all triggers to be handled but found {}\"\n .format(self._pending_triggers)\n )\n\n # start the event loop\n self._is_reacting = True\n try:\n self._event_loop(trigger)\n finally:\n self._is_reacting = False\n\n\n def _event_loop(self, trigger):\n \"\"\"\n Run an event loop triggered by the given trigger.\n\n The loop will keep running until no further triggers fire.\n\n This should be triggered by only:\n * The beginning of a test, when there is no trigger to react to\n * A GPI trigger\n \"\"\"\n if _profiling:\n ctx = profiling_context()\n else:\n ctx = nullcontext()\n\n with ctx:\n # When a trigger fires it is unprimed internally\n if _debug:\n self.log.debug(\"Trigger fired: %s\" % str(trigger))\n # trigger.unprime()\n\n if self._mode == Scheduler._MODE_TERM:\n if _debug:\n self.log.debug(\"Ignoring trigger %s since we're terminating\" %\n str(trigger))\n return\n\n if trigger is self._read_only:\n self._mode = Scheduler._MODE_READONLY\n # Only GPI triggers affect the simulator scheduling mode\n elif isinstance(trigger, GPITrigger):\n self._mode = Scheduler._MODE_NORMAL\n\n # work through triggers one by one\n is_first = True\n self._pending_triggers.append(trigger)\n while self._pending_triggers:\n trigger = self._pending_triggers.pop(0)\n\n if not is_first and isinstance(trigger, GPITrigger):\n self.log.warning(\n \"A GPI trigger occurred after entering react - this \"\n \"should not happen.\"\n )\n assert False\n\n # this only exists to enable the warning above\n is_first = False\n\n if trigger not in self._trigger2coros:\n\n # GPI triggers should only be ever pending if there is an\n # associated coroutine waiting on that trigger, otherwise it would\n # have been unprimed already\n if isinstance(trigger, GPITrigger):\n self.log.critical(\n \"No coroutines waiting on trigger that fired: %s\" %\n str(trigger))\n\n trigger.log.info(\"I'm the culprit\")\n # For Python triggers this isn't actually an error - we might do\n # event.set() without knowing whether any coroutines are actually\n # waiting on this event, for example\n elif _debug:\n self.log.debug(\n \"No coroutines waiting on trigger that fired: %s\" %\n str(trigger))\n\n del trigger\n continue\n\n # Scheduled coroutines may append to our waiting list so the first\n # thing to do is pop all entries waiting on this trigger.\n scheduling = self._trigger2coros.pop(trigger)\n\n if _debug:\n debugstr = \"\\n\\t\".join([coro.__name__ for coro in scheduling])\n if len(scheduling):\n debugstr = \"\\n\\t\" + debugstr\n self.log.debug(\"%d pending coroutines for event %s%s\" %\n (len(scheduling), str(trigger), debugstr))\n\n # This trigger isn't needed any more\n trigger.unprime()\n\n for coro in scheduling:\n if _debug:\n self.log.debug(\"Scheduling coroutine %s\" % (coro.__name__))\n self.schedule(coro, trigger=trigger)\n if _debug:\n self.log.debug(\"Scheduled coroutine %s\" % (coro.__name__))\n\n # Schedule may have queued up some events so we'll burn through those\n while self._pending_events:\n if _debug:\n self.log.debug(\"Scheduling pending event %s\" %\n (str(self._pending_events[0])))\n self._pending_events.pop(0).set()\n\n # remove our reference to the objects at the end of each loop,\n # to try and avoid them being destroyed at a weird time (as\n # happened in gh-957)\n del trigger\n del coro\n del scheduling\n\n # no more pending triggers\n self._check_termination()\n if _debug:\n self.log.debug(\"All coroutines scheduled, handing control back\"\n \" to simulator\")\n\n\n def unschedule(self, coro):\n \"\"\"Unschedule a coroutine. Unprime any pending triggers\"\"\"\n\n # Unprime the trigger this coroutine is waiting on\n try:\n trigger = self._coro2trigger.pop(coro)\n except KeyError:\n # coroutine probably finished\n pass\n else:\n if coro in self._trigger2coros.setdefault(trigger, []):\n self._trigger2coros[trigger].remove(coro)\n if not self._trigger2coros[trigger]:\n trigger.unprime()\n del self._trigger2coros[trigger]\n\n if Join(coro) in self._trigger2coros:\n self.react(Join(coro))\n else:\n try:\n # throws an error if the background coroutine errored\n # and no one was monitoring it\n coro.retval\n except TestComplete as test_result:\n self.log.debug(\"TestComplete received: {}\".format(test_result.__class__.__name__))\n self.finish_test(test_result)\n except Exception as e:\n self.finish_test(create_error(self, \"Forked coroutine {} raised exception: {}\".format(coro, e)))\n\n def save_write(self, handle, value):\n if self._mode == Scheduler._MODE_READONLY:\n raise Exception(\"Write to object {0} was scheduled during a read-only sync phase.\".format(handle._name))\n\n # TODO: we should be able to better keep track of when this needs to\n # be scheduled\n if self._write_coro_inst is None:\n self._write_coro_inst = self._do_writes()\n self.schedule(self._write_coro_inst)\n\n self._writes[handle] = value\n self._writes_pending.set()\n\n def _coroutine_yielded(self, coro, trigger):\n \"\"\"Prime the trigger and update our internal mappings.\"\"\"\n self._coro2trigger[coro] = trigger\n\n trigger_coros = self._trigger2coros.setdefault(trigger, [])\n if coro is self._write_coro_inst:\n # Our internal write coroutine always runs before any user coroutines.\n # This preserves the behavior prior to the refactoring of writes to\n # this coroutine.\n trigger_coros.insert(0, coro)\n else:\n # Everything else joins the back of the queue\n trigger_coros.append(coro)\n\n if not trigger.primed:\n try:\n trigger.prime(self.react)\n except Exception as e:\n # Convert any exceptions into a test result\n self.finish_test(\n create_error(self, \"Unable to prime trigger %s: %s\" %\n (str(trigger), str(e))))\n\n def queue(self, coroutine):\n \"\"\"Queue a coroutine for execution\"\"\"\n self._pending_coros.append(coroutine)\n\n def queue_function(self, coroutine):\n \"\"\"Queue a coroutine for execution and move the containing thread\n so that it does not block execution of the main thread any longer.\n \"\"\"\n # We should be able to find ourselves inside the _pending_threads list\n matching_threads = [\n t\n for t in self._pending_threads\n if t.thread == threading.current_thread()\n ]\n if len(matching_threads) == 0:\n raise RuntimeError(\"queue_function called from unrecognized thread\")\n\n # Raises if there is more than one match. This can never happen, since\n # each entry always has a unique thread.\n t, = matching_threads\n\n t.thread_suspend()\n self._pending_coros.append(coroutine)\n return t\n\n def run_in_executor(self, func, *args, **kwargs):\n \"\"\"Run the coroutine in a separate execution thread\n and return a yieldable object for the caller.\n \"\"\"\n # Create a thread\n # Create a trigger that is called as a result of the thread finishing\n # Create an Event object that the caller can yield on\n # Event object set when the thread finishes execution, this blocks the\n # calling coroutine (but not the thread) until the external completes\n\n def execute_external(func, _waiter):\n _waiter._outcome = outcomes.capture(func, *args, **kwargs)\n if _debug:\n self.log.debug(\"Execution of external routine done %s\" % threading.current_thread())\n _waiter.thread_done()\n\n waiter = external_waiter()\n thread = threading.Thread(group=None, target=execute_external,\n name=func.__name__ + \"_thread\",\n args=([func, waiter]), kwargs={})\n\n waiter.thread = thread\n self._pending_threads.append(waiter)\n\n return waiter\n\n def add(self, coroutine):\n \"\"\"Add a new coroutine.\n\n Just a wrapper around self.schedule which provides some debug and\n useful error messages in the event of common gotchas.\n \"\"\"\n if isinstance(coroutine, cocotb.decorators.coroutine):\n self.log.critical(\n \"Attempt to schedule a coroutine that hasn't started\")\n coroutine.log.error(\"This is the failing coroutine\")\n self.log.warning(\n \"Did you forget to add parentheses to the @test decorator?\")\n self._test_result = TestError(\n \"Attempt to schedule a coroutine that hasn't started\")\n self._terminate = True\n return\n\n elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):\n self.log.critical(\n \"Attempt to add something to the scheduler which isn't a \"\n \"coroutine\")\n self.log.warning(\n \"Got: %s (%s)\" % (str(type(coroutine)), repr(coroutine)))\n self.log.warning(\"Did you use the @coroutine decorator?\")\n self._test_result = TestError(\n \"Attempt to schedule a coroutine that hasn't started\")\n self._terminate = True\n return\n\n if _debug:\n self.log.debug(\"Adding new coroutine %s\" % coroutine.__name__)\n\n self.schedule(coroutine)\n self._check_termination()\n return coroutine\n\n def new_test(self, coroutine):\n self._entrypoint = coroutine\n\n # This collection of functions parses a trigger out of the object\n # that was yielded by a coroutine, converting `list` -> `Waitable`,\n # `Waitable` -> `RunningCoroutine`, `RunningCoroutine` -> `Trigger`.\n # Doing them as separate functions allows us to avoid repeating unencessary\n # `isinstance` checks.\n\n def _trigger_from_started_coro(self, result):\n # type: (RunningCoroutine) -> Trigger\n if _debug:\n self.log.debug(\"Joining to already running coroutine: %s\" %\n result.__name__)\n return result.join()\n\n def _trigger_from_unstarted_coro(self, result):\n # type: (RunningCoroutine) -> Trigger\n self.queue(result)\n if _debug:\n self.log.debug(\"Scheduling nested coroutine: %s\" %\n result.__name__)\n return result.join()\n\n def _trigger_from_waitable(self, result):\n # type: (Waitable) -> Trigger\n return self._trigger_from_unstarted_coro(result._wait())\n\n def _trigger_from_list(self, result):\n # type: (list) -> Trigger\n return self._trigger_from_waitable(cocotb.triggers.First(*result))\n\n def _trigger_from_any(self, result):\n \"\"\"Convert a yielded object into a Trigger instance\"\"\"\n # note: the order of these can significantly impact performance\n\n if isinstance(result, Trigger):\n return result\n\n if isinstance(result, cocotb.decorators.RunningCoroutine):\n if not result.has_started():\n return self._trigger_from_unstarted_coro(result)\n else:\n return self._trigger_from_started_coro(result)\n\n if isinstance(result, list):\n return self._trigger_from_list(result)\n\n if isinstance(result, cocotb.triggers.Waitable):\n return self._trigger_from_waitable(result)\n\n raise TypeError\n\n def schedule(self, coroutine, trigger=None):\n \"\"\"Schedule a coroutine by calling the send method.\n\n Args:\n coroutine (cocotb.decorators.coroutine): The coroutine to schedule.\n trigger (cocotb.triggers.Trigger): The trigger that caused this\n coroutine to be scheduled.\n \"\"\"\n if trigger is None:\n send_outcome = outcomes.Value(None)\n else:\n send_outcome = trigger._outcome\n if _debug:\n self.log.debug(\"Scheduling with {}\".format(send_outcome))\n\n try:\n result = coroutine._advance(send_outcome)\n if _debug:\n self.log.debug(\"Coroutine %s yielded %s (mode %d)\" %\n (coroutine.__name__, str(result), self._mode))\n\n # TestComplete indication is game over, tidy up\n except TestComplete as test_result:\n # Tag that close down is needed, save the test_result\n # for later use in cleanup handler\n self.log.debug(\"TestComplete received: %s\" % test_result.__class__.__name__)\n self.finish_test(test_result)\n return\n\n # Normal coroutine completion\n except cocotb.decorators.CoroutineComplete as exc:\n if _debug:\n self.log.debug(\"Coroutine completed: %s\" % str(coroutine))\n self.unschedule(coroutine)\n return\n\n # Don't handle the result if we're shutting down\n if self._terminate:\n return\n\n try:\n result = self._trigger_from_any(result)\n except TypeError:\n msg = (\"Coroutine %s yielded something the scheduler can't handle\"\n % str(coroutine))\n msg += (\"\\nGot type: %s repr: %s str: %s\" %\n (type(result), repr(result), str(result)))\n msg += \"\\nDid you forget to decorate with @cocotb.coroutine?\"\n try:\n raise_error(self, msg)\n except Exception as e:\n self.finish_test(e)\n else:\n self._coroutine_yielded(coroutine, result)\n\n # We do not return from here until pending threads have completed, but only\n # from the main thread, this seems like it could be problematic in cases\n # where a sim might change what this thread is.\n def unblock_event(ext):\n @cocotb.coroutine\n def wrapper():\n ext.event.set()\n yield PythonTrigger()\n\n if self._main_thread is threading.current_thread():\n\n for ext in self._pending_threads:\n ext.thread_start()\n if _debug:\n self.log.debug(\"Blocking from %s on %s\" % (threading.current_thread(), ext.thread))\n state = ext.thread_wait()\n if _debug:\n self.log.debug(\"Back from wait on self %s with newstate %d\" % (threading.current_thread(), state))\n if state == external_state.EXITED:\n self._pending_threads.remove(ext)\n self._pending_events.append(ext.event)\n\n # Handle any newly queued coroutines that need to be scheduled\n while self._pending_coros:\n self.add(self._pending_coros.pop(0))\n\n def finish_test(self, test_result):\n \"\"\"Cache the test result and set the terminate flag.\"\"\"\n self.log.debug(\"finish_test called with %s\" % (repr(test_result)))\n if not self._terminate:\n self._terminate = True\n self._test_result = test_result\n self.cleanup()\n\n def finish_scheduler(self, test_result):\n \"\"\"Directly call into the regression manager and end test\n once we return the sim will close us so no cleanup is needed.\n \"\"\"\n self.log.debug(\"Issue sim closedown result to regression object\")\n cocotb.regression_manager.handle_result(test_result)\n\n def cleanup(self):\n \"\"\"Clear up all our state.\n\n Unprime all pending triggers and kill off any coroutines stop all externals.\n \"\"\"\n # copy since we modify this in kill\n items = list(self._trigger2coros.items())\n\n # reversing seems to fix gh-928, although the order is still somewhat\n # arbitrary.\n for trigger, waiting in items[::-1]:\n for coro in waiting:\n if _debug:\n self.log.debug(\"Killing %s\" % str(coro))\n coro.kill()\n\n if self._main_thread is not threading.current_thread():\n raise Exception(\"Cleanup() called outside of the main thread\")\n\n for ext in self._pending_threads:\n self.log.warn(\"Waiting for %s to exit\", ext.thread)\n", "path": "cocotb/scheduler.py" } ]
diff --git a/cocotb/scheduler.py b/cocotb/scheduler.py index d43c174018..de207cbec2 100755 --- a/cocotb/scheduler.py +++ b/cocotb/scheduler.py @@ -48,7 +48,7 @@ # Debug mode controlled by environment variables if "COCOTB_ENABLE_PROFILING" in os.environ: - import cProfile, StringIO, pstats + import cProfile, pstats _profile = cProfile.Profile() _profiling = True else:
encode__starlette-1553
Route naming introspection always return "method" for method endpoints Discussion was done at https://gitter.im/encode/community. Bug was confirmed by @Kludex. ## Description methods don't get detected on `is_function`, then we assume that `<object>.__class__.__name__ ` will give the right name (my_method on the example below) for it, but it actually gets the "method" name, which is wrong. Unexpected behaviour seem to originate from: https://github.com/encode/starlette/blob/e086fc2da361767b532cf690e5203619bbae98aa/starlette/routing.py#L87 ## Minimal example ```python from starlette.responses import JSONResponse from starlette.routing import Route async def my_function(request): return JSONResponse({'endpoint_type': 'function'}) class MyClass: def __call__(self, request): return JSONResponse({'endpoint_type': 'class'}) class MySpecialEndpointObject: async def my_method(self, request): return JSONResponse({'endpoint_type': 'method'}) endpoint_obj = MySpecialEndpointObject() function_route = Route('/functionEndpoint', my_function) class_route = Route('/classEndpoint', MyClass()) method_route = Route('/methodEndpoint', endpoint_obj.my_method) assert function_route.name == "my_function" assert class_route.name == "MyClass" assert method_route.name == "my_method" # AssertionError ``` ## Actual behavior Value of `method_route.name` is `"method"`. ## Expected behavior Value of `method_route.name` is `"my_method"`. It could also be `"MySpecialEndpointObject_my_method"`. Reason here is to prevent ambiguity.
[ { "content": "import asyncio\nimport contextlib\nimport functools\nimport inspect\nimport re\nimport sys\nimport traceback\nimport types\nimport typing\nimport warnings\nfrom enum import Enum\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.convertors import CONVERTOR_TYPES, Convertor\nfrom starlette.datastructures import URL, Headers, URLPath\nfrom starlette.exceptions import HTTPException\nfrom starlette.requests import Request\nfrom starlette.responses import PlainTextResponse, RedirectResponse\nfrom starlette.types import ASGIApp, Receive, Scope, Send\nfrom starlette.websockets import WebSocket, WebSocketClose\n\nif sys.version_info >= (3, 7):\n from contextlib import asynccontextmanager # pragma: no cover\nelse:\n from contextlib2 import asynccontextmanager # pragma: no cover\n\n\nclass NoMatchFound(Exception):\n \"\"\"\n Raised by `.url_for(name, **path_params)` and `.url_path_for(name, **path_params)`\n if no matching route exists.\n \"\"\"\n\n def __init__(self, name: str, path_params: typing.Dict[str, typing.Any]) -> None:\n params = \", \".join(list(path_params.keys()))\n super().__init__(f'No route exists for name \"{name}\" and params \"{params}\".')\n\n\nclass Match(Enum):\n NONE = 0\n PARTIAL = 1\n FULL = 2\n\n\ndef iscoroutinefunction_or_partial(obj: typing.Any) -> bool:\n \"\"\"\n Correctly determines if an object is a coroutine function,\n including those wrapped in functools.partial objects.\n \"\"\"\n while isinstance(obj, functools.partial):\n obj = obj.func\n return inspect.iscoroutinefunction(obj)\n\n\ndef request_response(func: typing.Callable) -> ASGIApp:\n \"\"\"\n Takes a function or coroutine `func(request) -> response`,\n and returns an ASGI application.\n \"\"\"\n is_coroutine = iscoroutinefunction_or_partial(func)\n\n async def app(scope: Scope, receive: Receive, send: Send) -> None:\n request = Request(scope, receive=receive, send=send)\n if is_coroutine:\n response = await func(request)\n else:\n response = await run_in_threadpool(func, request)\n await response(scope, receive, send)\n\n return app\n\n\ndef websocket_session(func: typing.Callable) -> ASGIApp:\n \"\"\"\n Takes a coroutine `func(session)`, and returns an ASGI application.\n \"\"\"\n # assert asyncio.iscoroutinefunction(func), \"WebSocket endpoints must be async\"\n\n async def app(scope: Scope, receive: Receive, send: Send) -> None:\n session = WebSocket(scope, receive=receive, send=send)\n await func(session)\n\n return app\n\n\ndef get_name(endpoint: typing.Callable) -> str:\n if inspect.isfunction(endpoint) or inspect.isclass(endpoint):\n return endpoint.__name__\n return endpoint.__class__.__name__\n\n\ndef replace_params(\n path: str,\n param_convertors: typing.Dict[str, Convertor],\n path_params: typing.Dict[str, str],\n) -> typing.Tuple[str, dict]:\n for key, value in list(path_params.items()):\n if \"{\" + key + \"}\" in path:\n convertor = param_convertors[key]\n value = convertor.to_string(value)\n path = path.replace(\"{\" + key + \"}\", value)\n path_params.pop(key)\n return path, path_params\n\n\n# Match parameters in URL paths, eg. '{param}', and '{param:int}'\nPARAM_REGEX = re.compile(\"{([a-zA-Z_][a-zA-Z0-9_]*)(:[a-zA-Z_][a-zA-Z0-9_]*)?}\")\n\n\ndef compile_path(\n path: str,\n) -> typing.Tuple[typing.Pattern, str, typing.Dict[str, Convertor]]:\n \"\"\"\n Given a path string, like: \"/{username:str}\", return a three-tuple\n of (regex, format, {param_name:convertor}).\n\n regex: \"/(?P<username>[^/]+)\"\n format: \"/{username}\"\n convertors: {\"username\": StringConvertor()}\n \"\"\"\n path_regex = \"^\"\n path_format = \"\"\n duplicated_params = set()\n\n idx = 0\n param_convertors = {}\n for match in PARAM_REGEX.finditer(path):\n param_name, convertor_type = match.groups(\"str\")\n convertor_type = convertor_type.lstrip(\":\")\n assert (\n convertor_type in CONVERTOR_TYPES\n ), f\"Unknown path convertor '{convertor_type}'\"\n convertor = CONVERTOR_TYPES[convertor_type]\n\n path_regex += re.escape(path[idx : match.start()])\n path_regex += f\"(?P<{param_name}>{convertor.regex})\"\n\n path_format += path[idx : match.start()]\n path_format += \"{%s}\" % param_name\n\n if param_name in param_convertors:\n duplicated_params.add(param_name)\n\n param_convertors[param_name] = convertor\n\n idx = match.end()\n\n if duplicated_params:\n names = \", \".join(sorted(duplicated_params))\n ending = \"s\" if len(duplicated_params) > 1 else \"\"\n raise ValueError(f\"Duplicated param name{ending} {names} at path {path}\")\n\n path_regex += re.escape(path[idx:].split(\":\")[0]) + \"$\"\n path_format += path[idx:]\n\n return re.compile(path_regex), path_format, param_convertors\n\n\nclass BaseRoute:\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n raise NotImplementedError() # pragma: no cover\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n raise NotImplementedError() # pragma: no cover\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n raise NotImplementedError() # pragma: no cover\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n A route may be used in isolation as a stand-alone ASGI app.\n This is a somewhat contrived case, as they'll almost always be used\n within a Router, but could be useful for some tooling and minimal apps.\n \"\"\"\n match, child_scope = self.matches(scope)\n if match == Match.NONE:\n if scope[\"type\"] == \"http\":\n response = PlainTextResponse(\"Not Found\", status_code=404)\n await response(scope, receive, send)\n elif scope[\"type\"] == \"websocket\":\n websocket_close = WebSocketClose()\n await websocket_close(scope, receive, send)\n return\n\n scope.update(child_scope)\n await self.handle(scope, receive, send)\n\n\nclass Route(BaseRoute):\n def __init__(\n self,\n path: str,\n endpoint: typing.Callable,\n *,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> None:\n assert path.startswith(\"/\"), \"Routed paths must start with '/'\"\n self.path = path\n self.endpoint = endpoint\n self.name = get_name(endpoint) if name is None else name\n self.include_in_schema = include_in_schema\n\n endpoint_handler = endpoint\n while isinstance(endpoint_handler, functools.partial):\n endpoint_handler = endpoint_handler.func\n if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler):\n # Endpoint is function or method. Treat it as `func(request) -> response`.\n self.app = request_response(endpoint)\n if methods is None:\n methods = [\"GET\"]\n else:\n # Endpoint is a class. Treat it as ASGI.\n self.app = endpoint\n\n if methods is None:\n self.methods = None\n else:\n self.methods = {method.upper() for method in methods}\n if \"GET\" in self.methods:\n self.methods.add(\"HEAD\")\n\n self.path_regex, self.path_format, self.param_convertors = compile_path(path)\n\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n if scope[\"type\"] == \"http\":\n match = self.path_regex.match(scope[\"path\"])\n if match:\n matched_params = match.groupdict()\n for key, value in matched_params.items():\n matched_params[key] = self.param_convertors[key].convert(value)\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(matched_params)\n child_scope = {\"endpoint\": self.endpoint, \"path_params\": path_params}\n if self.methods and scope[\"method\"] not in self.methods:\n return Match.PARTIAL, child_scope\n else:\n return Match.FULL, child_scope\n return Match.NONE, {}\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n seen_params = set(path_params.keys())\n expected_params = set(self.param_convertors.keys())\n\n if name != self.name or seen_params != expected_params:\n raise NoMatchFound(name, path_params)\n\n path, remaining_params = replace_params(\n self.path_format, self.param_convertors, path_params\n )\n assert not remaining_params\n return URLPath(path=path, protocol=\"http\")\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n if self.methods and scope[\"method\"] not in self.methods:\n headers = {\"Allow\": \", \".join(self.methods)}\n if \"app\" in scope:\n raise HTTPException(status_code=405, headers=headers)\n else:\n response = PlainTextResponse(\n \"Method Not Allowed\", status_code=405, headers=headers\n )\n await response(scope, receive, send)\n else:\n await self.app(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Route)\n and self.path == other.path\n and self.endpoint == other.endpoint\n and self.methods == other.methods\n )\n\n\nclass WebSocketRoute(BaseRoute):\n def __init__(\n self, path: str, endpoint: typing.Callable, *, name: str = None\n ) -> None:\n assert path.startswith(\"/\"), \"Routed paths must start with '/'\"\n self.path = path\n self.endpoint = endpoint\n self.name = get_name(endpoint) if name is None else name\n\n endpoint_handler = endpoint\n while isinstance(endpoint_handler, functools.partial):\n endpoint_handler = endpoint_handler.func\n if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler):\n # Endpoint is function or method. Treat it as `func(websocket)`.\n self.app = websocket_session(endpoint)\n else:\n # Endpoint is a class. Treat it as ASGI.\n self.app = endpoint\n\n self.path_regex, self.path_format, self.param_convertors = compile_path(path)\n\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n if scope[\"type\"] == \"websocket\":\n match = self.path_regex.match(scope[\"path\"])\n if match:\n matched_params = match.groupdict()\n for key, value in matched_params.items():\n matched_params[key] = self.param_convertors[key].convert(value)\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(matched_params)\n child_scope = {\"endpoint\": self.endpoint, \"path_params\": path_params}\n return Match.FULL, child_scope\n return Match.NONE, {}\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n seen_params = set(path_params.keys())\n expected_params = set(self.param_convertors.keys())\n\n if name != self.name or seen_params != expected_params:\n raise NoMatchFound(name, path_params)\n\n path, remaining_params = replace_params(\n self.path_format, self.param_convertors, path_params\n )\n assert not remaining_params\n return URLPath(path=path, protocol=\"websocket\")\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n await self.app(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, WebSocketRoute)\n and self.path == other.path\n and self.endpoint == other.endpoint\n )\n\n\nclass Mount(BaseRoute):\n def __init__(\n self,\n path: str,\n app: ASGIApp = None,\n routes: typing.Sequence[BaseRoute] = None,\n name: str = None,\n ) -> None:\n assert path == \"\" or path.startswith(\"/\"), \"Routed paths must start with '/'\"\n assert (\n app is not None or routes is not None\n ), \"Either 'app=...', or 'routes=' must be specified\"\n self.path = path.rstrip(\"/\")\n if app is not None:\n self.app: ASGIApp = app\n else:\n self.app = Router(routes=routes)\n self.name = name\n self.path_regex, self.path_format, self.param_convertors = compile_path(\n self.path + \"/{path:path}\"\n )\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return getattr(self.app, \"routes\", [])\n\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n if scope[\"type\"] in (\"http\", \"websocket\"):\n path = scope[\"path\"]\n match = self.path_regex.match(path)\n if match:\n matched_params = match.groupdict()\n for key, value in matched_params.items():\n matched_params[key] = self.param_convertors[key].convert(value)\n remaining_path = \"/\" + matched_params.pop(\"path\")\n matched_path = path[: -len(remaining_path)]\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(matched_params)\n root_path = scope.get(\"root_path\", \"\")\n child_scope = {\n \"path_params\": path_params,\n \"app_root_path\": scope.get(\"app_root_path\", root_path),\n \"root_path\": root_path + matched_path,\n \"path\": remaining_path,\n \"endpoint\": self.app,\n }\n return Match.FULL, child_scope\n return Match.NONE, {}\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n if self.name is not None and name == self.name and \"path\" in path_params:\n # 'name' matches \"<mount_name>\".\n path_params[\"path\"] = path_params[\"path\"].lstrip(\"/\")\n path, remaining_params = replace_params(\n self.path_format, self.param_convertors, path_params\n )\n if not remaining_params:\n return URLPath(path=path)\n elif self.name is None or name.startswith(self.name + \":\"):\n if self.name is None:\n # No mount name.\n remaining_name = name\n else:\n # 'name' matches \"<mount_name>:<child_name>\".\n remaining_name = name[len(self.name) + 1 :]\n path_kwarg = path_params.get(\"path\")\n path_params[\"path\"] = \"\"\n path_prefix, remaining_params = replace_params(\n self.path_format, self.param_convertors, path_params\n )\n if path_kwarg is not None:\n remaining_params[\"path\"] = path_kwarg\n for route in self.routes or []:\n try:\n url = route.url_path_for(remaining_name, **remaining_params)\n return URLPath(\n path=path_prefix.rstrip(\"/\") + str(url), protocol=url.protocol\n )\n except NoMatchFound:\n pass\n raise NoMatchFound(name, path_params)\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n await self.app(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Mount)\n and self.path == other.path\n and self.app == other.app\n )\n\n\nclass Host(BaseRoute):\n def __init__(self, host: str, app: ASGIApp, name: str = None) -> None:\n self.host = host\n self.app = app\n self.name = name\n self.host_regex, self.host_format, self.param_convertors = compile_path(host)\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return getattr(self.app, \"routes\", [])\n\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n if scope[\"type\"] in (\"http\", \"websocket\"):\n headers = Headers(scope=scope)\n host = headers.get(\"host\", \"\").split(\":\")[0]\n match = self.host_regex.match(host)\n if match:\n matched_params = match.groupdict()\n for key, value in matched_params.items():\n matched_params[key] = self.param_convertors[key].convert(value)\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(matched_params)\n child_scope = {\"path_params\": path_params, \"endpoint\": self.app}\n return Match.FULL, child_scope\n return Match.NONE, {}\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n if self.name is not None and name == self.name and \"path\" in path_params:\n # 'name' matches \"<mount_name>\".\n path = path_params.pop(\"path\")\n host, remaining_params = replace_params(\n self.host_format, self.param_convertors, path_params\n )\n if not remaining_params:\n return URLPath(path=path, host=host)\n elif self.name is None or name.startswith(self.name + \":\"):\n if self.name is None:\n # No mount name.\n remaining_name = name\n else:\n # 'name' matches \"<mount_name>:<child_name>\".\n remaining_name = name[len(self.name) + 1 :]\n host, remaining_params = replace_params(\n self.host_format, self.param_convertors, path_params\n )\n for route in self.routes or []:\n try:\n url = route.url_path_for(remaining_name, **remaining_params)\n return URLPath(path=str(url), protocol=url.protocol, host=host)\n except NoMatchFound:\n pass\n raise NoMatchFound(name, path_params)\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n await self.app(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Host)\n and self.host == other.host\n and self.app == other.app\n )\n\n\n_T = typing.TypeVar(\"_T\")\n\n\nclass _AsyncLiftContextManager(typing.AsyncContextManager[_T]):\n def __init__(self, cm: typing.ContextManager[_T]):\n self._cm = cm\n\n async def __aenter__(self) -> _T:\n return self._cm.__enter__()\n\n async def __aexit__(\n self,\n exc_type: typing.Optional[typing.Type[BaseException]],\n exc_value: typing.Optional[BaseException],\n traceback: typing.Optional[types.TracebackType],\n ) -> typing.Optional[bool]:\n return self._cm.__exit__(exc_type, exc_value, traceback)\n\n\ndef _wrap_gen_lifespan_context(\n lifespan_context: typing.Callable[[typing.Any], typing.Generator]\n) -> typing.Callable[[typing.Any], typing.AsyncContextManager]:\n cmgr = contextlib.contextmanager(lifespan_context)\n\n @functools.wraps(cmgr)\n def wrapper(app: typing.Any) -> _AsyncLiftContextManager:\n return _AsyncLiftContextManager(cmgr(app))\n\n return wrapper\n\n\nclass _DefaultLifespan:\n def __init__(self, router: \"Router\"):\n self._router = router\n\n async def __aenter__(self) -> None:\n await self._router.startup()\n\n async def __aexit__(self, *exc_info: object) -> None:\n await self._router.shutdown()\n\n def __call__(self: _T, app: object) -> _T:\n return self\n\n\nclass Router:\n def __init__(\n self,\n routes: typing.Sequence[BaseRoute] = None,\n redirect_slashes: bool = True,\n default: ASGIApp = None,\n on_startup: typing.Sequence[typing.Callable] = None,\n on_shutdown: typing.Sequence[typing.Callable] = None,\n lifespan: typing.Callable[[typing.Any], typing.AsyncContextManager] = None,\n ) -> None:\n self.routes = [] if routes is None else list(routes)\n self.redirect_slashes = redirect_slashes\n self.default = self.not_found if default is None else default\n self.on_startup = [] if on_startup is None else list(on_startup)\n self.on_shutdown = [] if on_shutdown is None else list(on_shutdown)\n\n if lifespan is None:\n self.lifespan_context: typing.Callable[\n [typing.Any], typing.AsyncContextManager\n ] = _DefaultLifespan(self)\n\n elif inspect.isasyncgenfunction(lifespan):\n warnings.warn(\n \"async generator function lifespans are deprecated, \"\n \"use an @contextlib.asynccontextmanager function instead\",\n DeprecationWarning,\n )\n self.lifespan_context = asynccontextmanager(\n lifespan, # type: ignore[arg-type]\n )\n elif inspect.isgeneratorfunction(lifespan):\n warnings.warn(\n \"generator function lifespans are deprecated, \"\n \"use an @contextlib.asynccontextmanager function instead\",\n DeprecationWarning,\n )\n self.lifespan_context = _wrap_gen_lifespan_context(\n lifespan, # type: ignore[arg-type]\n )\n else:\n self.lifespan_context = lifespan\n\n async def not_found(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] == \"websocket\":\n websocket_close = WebSocketClose()\n await websocket_close(scope, receive, send)\n return\n\n # If we're running inside a starlette application then raise an\n # exception, so that the configurable exception handler can deal with\n # returning the response. For plain ASGI apps, just return the response.\n if \"app\" in scope:\n raise HTTPException(status_code=404)\n else:\n response = PlainTextResponse(\"Not Found\", status_code=404)\n await response(scope, receive, send)\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n for route in self.routes:\n try:\n return route.url_path_for(name, **path_params)\n except NoMatchFound:\n pass\n raise NoMatchFound(name, path_params)\n\n async def startup(self) -> None:\n \"\"\"\n Run any `.on_startup` event handlers.\n \"\"\"\n for handler in self.on_startup:\n if asyncio.iscoroutinefunction(handler):\n await handler()\n else:\n handler()\n\n async def shutdown(self) -> None:\n \"\"\"\n Run any `.on_shutdown` event handlers.\n \"\"\"\n for handler in self.on_shutdown:\n if asyncio.iscoroutinefunction(handler):\n await handler()\n else:\n handler()\n\n async def lifespan(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n Handle ASGI lifespan messages, which allows us to manage application\n startup and shutdown events.\n \"\"\"\n started = False\n app = scope.get(\"app\")\n await receive()\n try:\n async with self.lifespan_context(app):\n await send({\"type\": \"lifespan.startup.complete\"})\n started = True\n await receive()\n except BaseException:\n exc_text = traceback.format_exc()\n if started:\n await send({\"type\": \"lifespan.shutdown.failed\", \"message\": exc_text})\n else:\n await send({\"type\": \"lifespan.startup.failed\", \"message\": exc_text})\n raise\n else:\n await send({\"type\": \"lifespan.shutdown.complete\"})\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n The main entry point to the Router class.\n \"\"\"\n assert scope[\"type\"] in (\"http\", \"websocket\", \"lifespan\")\n\n if \"router\" not in scope:\n scope[\"router\"] = self\n\n if scope[\"type\"] == \"lifespan\":\n await self.lifespan(scope, receive, send)\n return\n\n partial = None\n\n for route in self.routes:\n # Determine if any route matches the incoming scope,\n # and hand over to the matching route if found.\n match, child_scope = route.matches(scope)\n if match == Match.FULL:\n scope.update(child_scope)\n await route.handle(scope, receive, send)\n return\n elif match == Match.PARTIAL and partial is None:\n partial = route\n partial_scope = child_scope\n\n if partial is not None:\n #  Handle partial matches. These are cases where an endpoint is\n # able to handle the request, but is not a preferred option.\n # We use this in particular to deal with \"405 Method Not Allowed\".\n scope.update(partial_scope)\n await partial.handle(scope, receive, send)\n return\n\n if scope[\"type\"] == \"http\" and self.redirect_slashes and scope[\"path\"] != \"/\":\n redirect_scope = dict(scope)\n if scope[\"path\"].endswith(\"/\"):\n redirect_scope[\"path\"] = redirect_scope[\"path\"].rstrip(\"/\")\n else:\n redirect_scope[\"path\"] = redirect_scope[\"path\"] + \"/\"\n\n for route in self.routes:\n match, child_scope = route.matches(redirect_scope)\n if match != Match.NONE:\n redirect_url = URL(scope=redirect_scope)\n response = RedirectResponse(url=str(redirect_url))\n await response(scope, receive, send)\n return\n\n await self.default(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return isinstance(other, Router) and self.routes == other.routes\n\n # The following usages are now discouraged in favour of configuration\n #  during Router.__init__(...)\n def mount(\n self, path: str, app: ASGIApp, name: str = None\n ) -> None: # pragma: nocover\n \"\"\"\n We no longer document this API, and its usage is discouraged.\n Instead you should use the following approach:\n\n routes = [\n Mount(path, ...),\n ...\n ]\n\n app = Starlette(routes=routes)\n \"\"\"\n\n route = Mount(path, app=app, name=name)\n self.routes.append(route)\n\n def host(\n self, host: str, app: ASGIApp, name: str = None\n ) -> None: # pragma: no cover\n \"\"\"\n We no longer document this API, and its usage is discouraged.\n Instead you should use the following approach:\n\n routes = [\n Host(path, ...),\n ...\n ]\n\n app = Starlette(routes=routes)\n \"\"\"\n\n route = Host(host, app=app, name=name)\n self.routes.append(route)\n\n def add_route(\n self,\n path: str,\n endpoint: typing.Callable,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> None: # pragma: nocover\n route = Route(\n path,\n endpoint=endpoint,\n methods=methods,\n name=name,\n include_in_schema=include_in_schema,\n )\n self.routes.append(route)\n\n def add_websocket_route(\n self, path: str, endpoint: typing.Callable, name: str = None\n ) -> None: # pragma: no cover\n route = WebSocketRoute(path, endpoint=endpoint, name=name)\n self.routes.append(route)\n\n def route(\n self,\n path: str,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> typing.Callable: # pragma: nocover\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n routes = [\n Route(path, endpoint=..., ...),\n ...\n ]\n\n app = Starlette(routes=routes)\n \"\"\"\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_route(\n path,\n func,\n methods=methods,\n name=name,\n include_in_schema=include_in_schema,\n )\n return func\n\n return decorator\n\n def websocket_route(\n self, path: str, name: str = None\n ) -> typing.Callable: # pragma: nocover\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n routes = [\n WebSocketRoute(path, endpoint=..., ...),\n ...\n ]\n\n app = Starlette(routes=routes)\n \"\"\"\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_websocket_route(path, func, name=name)\n return func\n\n return decorator\n\n def add_event_handler(\n self, event_type: str, func: typing.Callable\n ) -> None: # pragma: no cover\n assert event_type in (\"startup\", \"shutdown\")\n\n if event_type == \"startup\":\n self.on_startup.append(func)\n else:\n self.on_shutdown.append(func)\n\n def on_event(self, event_type: str) -> typing.Callable: # pragma: nocover\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_event_handler(event_type, func)\n return func\n\n return decorator\n", "path": "starlette/routing.py" } ]
[ { "content": "import asyncio\nimport contextlib\nimport functools\nimport inspect\nimport re\nimport sys\nimport traceback\nimport types\nimport typing\nimport warnings\nfrom enum import Enum\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.convertors import CONVERTOR_TYPES, Convertor\nfrom starlette.datastructures import URL, Headers, URLPath\nfrom starlette.exceptions import HTTPException\nfrom starlette.requests import Request\nfrom starlette.responses import PlainTextResponse, RedirectResponse\nfrom starlette.types import ASGIApp, Receive, Scope, Send\nfrom starlette.websockets import WebSocket, WebSocketClose\n\nif sys.version_info >= (3, 7):\n from contextlib import asynccontextmanager # pragma: no cover\nelse:\n from contextlib2 import asynccontextmanager # pragma: no cover\n\n\nclass NoMatchFound(Exception):\n \"\"\"\n Raised by `.url_for(name, **path_params)` and `.url_path_for(name, **path_params)`\n if no matching route exists.\n \"\"\"\n\n def __init__(self, name: str, path_params: typing.Dict[str, typing.Any]) -> None:\n params = \", \".join(list(path_params.keys()))\n super().__init__(f'No route exists for name \"{name}\" and params \"{params}\".')\n\n\nclass Match(Enum):\n NONE = 0\n PARTIAL = 1\n FULL = 2\n\n\ndef iscoroutinefunction_or_partial(obj: typing.Any) -> bool:\n \"\"\"\n Correctly determines if an object is a coroutine function,\n including those wrapped in functools.partial objects.\n \"\"\"\n while isinstance(obj, functools.partial):\n obj = obj.func\n return inspect.iscoroutinefunction(obj)\n\n\ndef request_response(func: typing.Callable) -> ASGIApp:\n \"\"\"\n Takes a function or coroutine `func(request) -> response`,\n and returns an ASGI application.\n \"\"\"\n is_coroutine = iscoroutinefunction_or_partial(func)\n\n async def app(scope: Scope, receive: Receive, send: Send) -> None:\n request = Request(scope, receive=receive, send=send)\n if is_coroutine:\n response = await func(request)\n else:\n response = await run_in_threadpool(func, request)\n await response(scope, receive, send)\n\n return app\n\n\ndef websocket_session(func: typing.Callable) -> ASGIApp:\n \"\"\"\n Takes a coroutine `func(session)`, and returns an ASGI application.\n \"\"\"\n # assert asyncio.iscoroutinefunction(func), \"WebSocket endpoints must be async\"\n\n async def app(scope: Scope, receive: Receive, send: Send) -> None:\n session = WebSocket(scope, receive=receive, send=send)\n await func(session)\n\n return app\n\n\ndef get_name(endpoint: typing.Callable) -> str:\n if inspect.isroutine(endpoint) or inspect.isclass(endpoint):\n return endpoint.__name__\n return endpoint.__class__.__name__\n\n\ndef replace_params(\n path: str,\n param_convertors: typing.Dict[str, Convertor],\n path_params: typing.Dict[str, str],\n) -> typing.Tuple[str, dict]:\n for key, value in list(path_params.items()):\n if \"{\" + key + \"}\" in path:\n convertor = param_convertors[key]\n value = convertor.to_string(value)\n path = path.replace(\"{\" + key + \"}\", value)\n path_params.pop(key)\n return path, path_params\n\n\n# Match parameters in URL paths, eg. '{param}', and '{param:int}'\nPARAM_REGEX = re.compile(\"{([a-zA-Z_][a-zA-Z0-9_]*)(:[a-zA-Z_][a-zA-Z0-9_]*)?}\")\n\n\ndef compile_path(\n path: str,\n) -> typing.Tuple[typing.Pattern, str, typing.Dict[str, Convertor]]:\n \"\"\"\n Given a path string, like: \"/{username:str}\", return a three-tuple\n of (regex, format, {param_name:convertor}).\n\n regex: \"/(?P<username>[^/]+)\"\n format: \"/{username}\"\n convertors: {\"username\": StringConvertor()}\n \"\"\"\n path_regex = \"^\"\n path_format = \"\"\n duplicated_params = set()\n\n idx = 0\n param_convertors = {}\n for match in PARAM_REGEX.finditer(path):\n param_name, convertor_type = match.groups(\"str\")\n convertor_type = convertor_type.lstrip(\":\")\n assert (\n convertor_type in CONVERTOR_TYPES\n ), f\"Unknown path convertor '{convertor_type}'\"\n convertor = CONVERTOR_TYPES[convertor_type]\n\n path_regex += re.escape(path[idx : match.start()])\n path_regex += f\"(?P<{param_name}>{convertor.regex})\"\n\n path_format += path[idx : match.start()]\n path_format += \"{%s}\" % param_name\n\n if param_name in param_convertors:\n duplicated_params.add(param_name)\n\n param_convertors[param_name] = convertor\n\n idx = match.end()\n\n if duplicated_params:\n names = \", \".join(sorted(duplicated_params))\n ending = \"s\" if len(duplicated_params) > 1 else \"\"\n raise ValueError(f\"Duplicated param name{ending} {names} at path {path}\")\n\n path_regex += re.escape(path[idx:].split(\":\")[0]) + \"$\"\n path_format += path[idx:]\n\n return re.compile(path_regex), path_format, param_convertors\n\n\nclass BaseRoute:\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n raise NotImplementedError() # pragma: no cover\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n raise NotImplementedError() # pragma: no cover\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n raise NotImplementedError() # pragma: no cover\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n A route may be used in isolation as a stand-alone ASGI app.\n This is a somewhat contrived case, as they'll almost always be used\n within a Router, but could be useful for some tooling and minimal apps.\n \"\"\"\n match, child_scope = self.matches(scope)\n if match == Match.NONE:\n if scope[\"type\"] == \"http\":\n response = PlainTextResponse(\"Not Found\", status_code=404)\n await response(scope, receive, send)\n elif scope[\"type\"] == \"websocket\":\n websocket_close = WebSocketClose()\n await websocket_close(scope, receive, send)\n return\n\n scope.update(child_scope)\n await self.handle(scope, receive, send)\n\n\nclass Route(BaseRoute):\n def __init__(\n self,\n path: str,\n endpoint: typing.Callable,\n *,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> None:\n assert path.startswith(\"/\"), \"Routed paths must start with '/'\"\n self.path = path\n self.endpoint = endpoint\n self.name = get_name(endpoint) if name is None else name\n self.include_in_schema = include_in_schema\n\n endpoint_handler = endpoint\n while isinstance(endpoint_handler, functools.partial):\n endpoint_handler = endpoint_handler.func\n if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler):\n # Endpoint is function or method. Treat it as `func(request) -> response`.\n self.app = request_response(endpoint)\n if methods is None:\n methods = [\"GET\"]\n else:\n # Endpoint is a class. Treat it as ASGI.\n self.app = endpoint\n\n if methods is None:\n self.methods = None\n else:\n self.methods = {method.upper() for method in methods}\n if \"GET\" in self.methods:\n self.methods.add(\"HEAD\")\n\n self.path_regex, self.path_format, self.param_convertors = compile_path(path)\n\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n if scope[\"type\"] == \"http\":\n match = self.path_regex.match(scope[\"path\"])\n if match:\n matched_params = match.groupdict()\n for key, value in matched_params.items():\n matched_params[key] = self.param_convertors[key].convert(value)\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(matched_params)\n child_scope = {\"endpoint\": self.endpoint, \"path_params\": path_params}\n if self.methods and scope[\"method\"] not in self.methods:\n return Match.PARTIAL, child_scope\n else:\n return Match.FULL, child_scope\n return Match.NONE, {}\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n seen_params = set(path_params.keys())\n expected_params = set(self.param_convertors.keys())\n\n if name != self.name or seen_params != expected_params:\n raise NoMatchFound(name, path_params)\n\n path, remaining_params = replace_params(\n self.path_format, self.param_convertors, path_params\n )\n assert not remaining_params\n return URLPath(path=path, protocol=\"http\")\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n if self.methods and scope[\"method\"] not in self.methods:\n headers = {\"Allow\": \", \".join(self.methods)}\n if \"app\" in scope:\n raise HTTPException(status_code=405, headers=headers)\n else:\n response = PlainTextResponse(\n \"Method Not Allowed\", status_code=405, headers=headers\n )\n await response(scope, receive, send)\n else:\n await self.app(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Route)\n and self.path == other.path\n and self.endpoint == other.endpoint\n and self.methods == other.methods\n )\n\n\nclass WebSocketRoute(BaseRoute):\n def __init__(\n self, path: str, endpoint: typing.Callable, *, name: str = None\n ) -> None:\n assert path.startswith(\"/\"), \"Routed paths must start with '/'\"\n self.path = path\n self.endpoint = endpoint\n self.name = get_name(endpoint) if name is None else name\n\n endpoint_handler = endpoint\n while isinstance(endpoint_handler, functools.partial):\n endpoint_handler = endpoint_handler.func\n if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler):\n # Endpoint is function or method. Treat it as `func(websocket)`.\n self.app = websocket_session(endpoint)\n else:\n # Endpoint is a class. Treat it as ASGI.\n self.app = endpoint\n\n self.path_regex, self.path_format, self.param_convertors = compile_path(path)\n\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n if scope[\"type\"] == \"websocket\":\n match = self.path_regex.match(scope[\"path\"])\n if match:\n matched_params = match.groupdict()\n for key, value in matched_params.items():\n matched_params[key] = self.param_convertors[key].convert(value)\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(matched_params)\n child_scope = {\"endpoint\": self.endpoint, \"path_params\": path_params}\n return Match.FULL, child_scope\n return Match.NONE, {}\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n seen_params = set(path_params.keys())\n expected_params = set(self.param_convertors.keys())\n\n if name != self.name or seen_params != expected_params:\n raise NoMatchFound(name, path_params)\n\n path, remaining_params = replace_params(\n self.path_format, self.param_convertors, path_params\n )\n assert not remaining_params\n return URLPath(path=path, protocol=\"websocket\")\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n await self.app(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, WebSocketRoute)\n and self.path == other.path\n and self.endpoint == other.endpoint\n )\n\n\nclass Mount(BaseRoute):\n def __init__(\n self,\n path: str,\n app: ASGIApp = None,\n routes: typing.Sequence[BaseRoute] = None,\n name: str = None,\n ) -> None:\n assert path == \"\" or path.startswith(\"/\"), \"Routed paths must start with '/'\"\n assert (\n app is not None or routes is not None\n ), \"Either 'app=...', or 'routes=' must be specified\"\n self.path = path.rstrip(\"/\")\n if app is not None:\n self.app: ASGIApp = app\n else:\n self.app = Router(routes=routes)\n self.name = name\n self.path_regex, self.path_format, self.param_convertors = compile_path(\n self.path + \"/{path:path}\"\n )\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return getattr(self.app, \"routes\", [])\n\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n if scope[\"type\"] in (\"http\", \"websocket\"):\n path = scope[\"path\"]\n match = self.path_regex.match(path)\n if match:\n matched_params = match.groupdict()\n for key, value in matched_params.items():\n matched_params[key] = self.param_convertors[key].convert(value)\n remaining_path = \"/\" + matched_params.pop(\"path\")\n matched_path = path[: -len(remaining_path)]\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(matched_params)\n root_path = scope.get(\"root_path\", \"\")\n child_scope = {\n \"path_params\": path_params,\n \"app_root_path\": scope.get(\"app_root_path\", root_path),\n \"root_path\": root_path + matched_path,\n \"path\": remaining_path,\n \"endpoint\": self.app,\n }\n return Match.FULL, child_scope\n return Match.NONE, {}\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n if self.name is not None and name == self.name and \"path\" in path_params:\n # 'name' matches \"<mount_name>\".\n path_params[\"path\"] = path_params[\"path\"].lstrip(\"/\")\n path, remaining_params = replace_params(\n self.path_format, self.param_convertors, path_params\n )\n if not remaining_params:\n return URLPath(path=path)\n elif self.name is None or name.startswith(self.name + \":\"):\n if self.name is None:\n # No mount name.\n remaining_name = name\n else:\n # 'name' matches \"<mount_name>:<child_name>\".\n remaining_name = name[len(self.name) + 1 :]\n path_kwarg = path_params.get(\"path\")\n path_params[\"path\"] = \"\"\n path_prefix, remaining_params = replace_params(\n self.path_format, self.param_convertors, path_params\n )\n if path_kwarg is not None:\n remaining_params[\"path\"] = path_kwarg\n for route in self.routes or []:\n try:\n url = route.url_path_for(remaining_name, **remaining_params)\n return URLPath(\n path=path_prefix.rstrip(\"/\") + str(url), protocol=url.protocol\n )\n except NoMatchFound:\n pass\n raise NoMatchFound(name, path_params)\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n await self.app(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Mount)\n and self.path == other.path\n and self.app == other.app\n )\n\n\nclass Host(BaseRoute):\n def __init__(self, host: str, app: ASGIApp, name: str = None) -> None:\n self.host = host\n self.app = app\n self.name = name\n self.host_regex, self.host_format, self.param_convertors = compile_path(host)\n\n @property\n def routes(self) -> typing.List[BaseRoute]:\n return getattr(self.app, \"routes\", [])\n\n def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:\n if scope[\"type\"] in (\"http\", \"websocket\"):\n headers = Headers(scope=scope)\n host = headers.get(\"host\", \"\").split(\":\")[0]\n match = self.host_regex.match(host)\n if match:\n matched_params = match.groupdict()\n for key, value in matched_params.items():\n matched_params[key] = self.param_convertors[key].convert(value)\n path_params = dict(scope.get(\"path_params\", {}))\n path_params.update(matched_params)\n child_scope = {\"path_params\": path_params, \"endpoint\": self.app}\n return Match.FULL, child_scope\n return Match.NONE, {}\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n if self.name is not None and name == self.name and \"path\" in path_params:\n # 'name' matches \"<mount_name>\".\n path = path_params.pop(\"path\")\n host, remaining_params = replace_params(\n self.host_format, self.param_convertors, path_params\n )\n if not remaining_params:\n return URLPath(path=path, host=host)\n elif self.name is None or name.startswith(self.name + \":\"):\n if self.name is None:\n # No mount name.\n remaining_name = name\n else:\n # 'name' matches \"<mount_name>:<child_name>\".\n remaining_name = name[len(self.name) + 1 :]\n host, remaining_params = replace_params(\n self.host_format, self.param_convertors, path_params\n )\n for route in self.routes or []:\n try:\n url = route.url_path_for(remaining_name, **remaining_params)\n return URLPath(path=str(url), protocol=url.protocol, host=host)\n except NoMatchFound:\n pass\n raise NoMatchFound(name, path_params)\n\n async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:\n await self.app(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, Host)\n and self.host == other.host\n and self.app == other.app\n )\n\n\n_T = typing.TypeVar(\"_T\")\n\n\nclass _AsyncLiftContextManager(typing.AsyncContextManager[_T]):\n def __init__(self, cm: typing.ContextManager[_T]):\n self._cm = cm\n\n async def __aenter__(self) -> _T:\n return self._cm.__enter__()\n\n async def __aexit__(\n self,\n exc_type: typing.Optional[typing.Type[BaseException]],\n exc_value: typing.Optional[BaseException],\n traceback: typing.Optional[types.TracebackType],\n ) -> typing.Optional[bool]:\n return self._cm.__exit__(exc_type, exc_value, traceback)\n\n\ndef _wrap_gen_lifespan_context(\n lifespan_context: typing.Callable[[typing.Any], typing.Generator]\n) -> typing.Callable[[typing.Any], typing.AsyncContextManager]:\n cmgr = contextlib.contextmanager(lifespan_context)\n\n @functools.wraps(cmgr)\n def wrapper(app: typing.Any) -> _AsyncLiftContextManager:\n return _AsyncLiftContextManager(cmgr(app))\n\n return wrapper\n\n\nclass _DefaultLifespan:\n def __init__(self, router: \"Router\"):\n self._router = router\n\n async def __aenter__(self) -> None:\n await self._router.startup()\n\n async def __aexit__(self, *exc_info: object) -> None:\n await self._router.shutdown()\n\n def __call__(self: _T, app: object) -> _T:\n return self\n\n\nclass Router:\n def __init__(\n self,\n routes: typing.Sequence[BaseRoute] = None,\n redirect_slashes: bool = True,\n default: ASGIApp = None,\n on_startup: typing.Sequence[typing.Callable] = None,\n on_shutdown: typing.Sequence[typing.Callable] = None,\n lifespan: typing.Callable[[typing.Any], typing.AsyncContextManager] = None,\n ) -> None:\n self.routes = [] if routes is None else list(routes)\n self.redirect_slashes = redirect_slashes\n self.default = self.not_found if default is None else default\n self.on_startup = [] if on_startup is None else list(on_startup)\n self.on_shutdown = [] if on_shutdown is None else list(on_shutdown)\n\n if lifespan is None:\n self.lifespan_context: typing.Callable[\n [typing.Any], typing.AsyncContextManager\n ] = _DefaultLifespan(self)\n\n elif inspect.isasyncgenfunction(lifespan):\n warnings.warn(\n \"async generator function lifespans are deprecated, \"\n \"use an @contextlib.asynccontextmanager function instead\",\n DeprecationWarning,\n )\n self.lifespan_context = asynccontextmanager(\n lifespan, # type: ignore[arg-type]\n )\n elif inspect.isgeneratorfunction(lifespan):\n warnings.warn(\n \"generator function lifespans are deprecated, \"\n \"use an @contextlib.asynccontextmanager function instead\",\n DeprecationWarning,\n )\n self.lifespan_context = _wrap_gen_lifespan_context(\n lifespan, # type: ignore[arg-type]\n )\n else:\n self.lifespan_context = lifespan\n\n async def not_found(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] == \"websocket\":\n websocket_close = WebSocketClose()\n await websocket_close(scope, receive, send)\n return\n\n # If we're running inside a starlette application then raise an\n # exception, so that the configurable exception handler can deal with\n # returning the response. For plain ASGI apps, just return the response.\n if \"app\" in scope:\n raise HTTPException(status_code=404)\n else:\n response = PlainTextResponse(\"Not Found\", status_code=404)\n await response(scope, receive, send)\n\n def url_path_for(self, name: str, **path_params: typing.Any) -> URLPath:\n for route in self.routes:\n try:\n return route.url_path_for(name, **path_params)\n except NoMatchFound:\n pass\n raise NoMatchFound(name, path_params)\n\n async def startup(self) -> None:\n \"\"\"\n Run any `.on_startup` event handlers.\n \"\"\"\n for handler in self.on_startup:\n if asyncio.iscoroutinefunction(handler):\n await handler()\n else:\n handler()\n\n async def shutdown(self) -> None:\n \"\"\"\n Run any `.on_shutdown` event handlers.\n \"\"\"\n for handler in self.on_shutdown:\n if asyncio.iscoroutinefunction(handler):\n await handler()\n else:\n handler()\n\n async def lifespan(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n Handle ASGI lifespan messages, which allows us to manage application\n startup and shutdown events.\n \"\"\"\n started = False\n app = scope.get(\"app\")\n await receive()\n try:\n async with self.lifespan_context(app):\n await send({\"type\": \"lifespan.startup.complete\"})\n started = True\n await receive()\n except BaseException:\n exc_text = traceback.format_exc()\n if started:\n await send({\"type\": \"lifespan.shutdown.failed\", \"message\": exc_text})\n else:\n await send({\"type\": \"lifespan.startup.failed\", \"message\": exc_text})\n raise\n else:\n await send({\"type\": \"lifespan.shutdown.complete\"})\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n The main entry point to the Router class.\n \"\"\"\n assert scope[\"type\"] in (\"http\", \"websocket\", \"lifespan\")\n\n if \"router\" not in scope:\n scope[\"router\"] = self\n\n if scope[\"type\"] == \"lifespan\":\n await self.lifespan(scope, receive, send)\n return\n\n partial = None\n\n for route in self.routes:\n # Determine if any route matches the incoming scope,\n # and hand over to the matching route if found.\n match, child_scope = route.matches(scope)\n if match == Match.FULL:\n scope.update(child_scope)\n await route.handle(scope, receive, send)\n return\n elif match == Match.PARTIAL and partial is None:\n partial = route\n partial_scope = child_scope\n\n if partial is not None:\n #  Handle partial matches. These are cases where an endpoint is\n # able to handle the request, but is not a preferred option.\n # We use this in particular to deal with \"405 Method Not Allowed\".\n scope.update(partial_scope)\n await partial.handle(scope, receive, send)\n return\n\n if scope[\"type\"] == \"http\" and self.redirect_slashes and scope[\"path\"] != \"/\":\n redirect_scope = dict(scope)\n if scope[\"path\"].endswith(\"/\"):\n redirect_scope[\"path\"] = redirect_scope[\"path\"].rstrip(\"/\")\n else:\n redirect_scope[\"path\"] = redirect_scope[\"path\"] + \"/\"\n\n for route in self.routes:\n match, child_scope = route.matches(redirect_scope)\n if match != Match.NONE:\n redirect_url = URL(scope=redirect_scope)\n response = RedirectResponse(url=str(redirect_url))\n await response(scope, receive, send)\n return\n\n await self.default(scope, receive, send)\n\n def __eq__(self, other: typing.Any) -> bool:\n return isinstance(other, Router) and self.routes == other.routes\n\n # The following usages are now discouraged in favour of configuration\n #  during Router.__init__(...)\n def mount(\n self, path: str, app: ASGIApp, name: str = None\n ) -> None: # pragma: nocover\n \"\"\"\n We no longer document this API, and its usage is discouraged.\n Instead you should use the following approach:\n\n routes = [\n Mount(path, ...),\n ...\n ]\n\n app = Starlette(routes=routes)\n \"\"\"\n\n route = Mount(path, app=app, name=name)\n self.routes.append(route)\n\n def host(\n self, host: str, app: ASGIApp, name: str = None\n ) -> None: # pragma: no cover\n \"\"\"\n We no longer document this API, and its usage is discouraged.\n Instead you should use the following approach:\n\n routes = [\n Host(path, ...),\n ...\n ]\n\n app = Starlette(routes=routes)\n \"\"\"\n\n route = Host(host, app=app, name=name)\n self.routes.append(route)\n\n def add_route(\n self,\n path: str,\n endpoint: typing.Callable,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> None: # pragma: nocover\n route = Route(\n path,\n endpoint=endpoint,\n methods=methods,\n name=name,\n include_in_schema=include_in_schema,\n )\n self.routes.append(route)\n\n def add_websocket_route(\n self, path: str, endpoint: typing.Callable, name: str = None\n ) -> None: # pragma: no cover\n route = WebSocketRoute(path, endpoint=endpoint, name=name)\n self.routes.append(route)\n\n def route(\n self,\n path: str,\n methods: typing.List[str] = None,\n name: str = None,\n include_in_schema: bool = True,\n ) -> typing.Callable: # pragma: nocover\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n routes = [\n Route(path, endpoint=..., ...),\n ...\n ]\n\n app = Starlette(routes=routes)\n \"\"\"\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_route(\n path,\n func,\n methods=methods,\n name=name,\n include_in_schema=include_in_schema,\n )\n return func\n\n return decorator\n\n def websocket_route(\n self, path: str, name: str = None\n ) -> typing.Callable: # pragma: nocover\n \"\"\"\n We no longer document this decorator style API, and its usage is discouraged.\n Instead you should use the following approach:\n\n routes = [\n WebSocketRoute(path, endpoint=..., ...),\n ...\n ]\n\n app = Starlette(routes=routes)\n \"\"\"\n\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_websocket_route(path, func, name=name)\n return func\n\n return decorator\n\n def add_event_handler(\n self, event_type: str, func: typing.Callable\n ) -> None: # pragma: no cover\n assert event_type in (\"startup\", \"shutdown\")\n\n if event_type == \"startup\":\n self.on_startup.append(func)\n else:\n self.on_shutdown.append(func)\n\n def on_event(self, event_type: str) -> typing.Callable: # pragma: nocover\n def decorator(func: typing.Callable) -> typing.Callable:\n self.add_event_handler(event_type, func)\n return func\n\n return decorator\n", "path": "starlette/routing.py" } ]
diff --git a/starlette/routing.py b/starlette/routing.py index 0388304c9..ea6ec2117 100644 --- a/starlette/routing.py +++ b/starlette/routing.py @@ -84,7 +84,7 @@ async def app(scope: Scope, receive: Receive, send: Send) -> None: def get_name(endpoint: typing.Callable) -> str: - if inspect.isfunction(endpoint) or inspect.isclass(endpoint): + if inspect.isroutine(endpoint) or inspect.isclass(endpoint): return endpoint.__name__ return endpoint.__class__.__name__ diff --git a/tests/test_routing.py b/tests/test_routing.py index 7077c5616..e8adaca48 100644 --- a/tests/test_routing.py +++ b/tests/test_routing.py @@ -1,4 +1,5 @@ import functools +import typing import uuid import pytest @@ -710,3 +711,38 @@ def test_duplicated_param_names(): match="Duplicated param names id, name at path /{id}/{name}/{id}/{name}", ): Route("/{id}/{name}/{id}/{name}", user) + + +class Endpoint: + async def my_method(self, request): + ... # pragma: no cover + + @classmethod + async def my_classmethod(cls, request): + ... # pragma: no cover + + @staticmethod + async def my_staticmethod(request): + ... # pragma: no cover + + def __call__(self, request): + ... # pragma: no cover + + [email protected]( + "endpoint, expected_name", + [ + pytest.param(func_homepage, "func_homepage", id="function"), + pytest.param(Endpoint().my_method, "my_method", id="method"), + pytest.param(Endpoint.my_classmethod, "my_classmethod", id="classmethod"), + pytest.param( + Endpoint.my_staticmethod, + "my_staticmethod", + id="staticmethod", + ), + pytest.param(Endpoint(), "Endpoint", id="object"), + pytest.param(lambda request: ..., "<lambda>", id="lambda"), + ], +) +def test_route_name(endpoint: typing.Callable, expected_name: str): + assert Route(path="/", endpoint=endpoint).name == expected_name
pydantic__pydantic-3473
pydantic.utils.to_camel() is actually to_pascal() ### Checks * [ y ] I added a descriptive title to this issue * [ y ] I have searched (google, github) for similar issues and couldn't find anything * [ y ] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug # Bug Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`: ``` pydantic version: 1.8.1 pydantic compiled: True install path: /home/schlerp/projects/pelt-studio/venv/lib/python3.8/site-packages/pydantic python version: 3.8.5 (default, Sep 4 2020, 07:30:14) [GCC 7.3.0] platform: Linux-5.15.2-zen1-1-zen-x86_64-with-glibc2.10 optional deps. installed: ['typing-extensions'] ``` Camel case and pascal case are similar however, they differ by the capitalisation of the first letter. The current implementation of camel_case in pydantic us actually pascal case and not camel case at all. I suggest renaming this and also implementing a camel case. See below for code expressing the issue and suggested fix. **Pascal Case** (aka init case or upper camel case) All spaces/underscores removed and the start of every word is capitalised. **Camel Case** (aka lower camel case) All spaces and underscores removed and the start of every word, is capitalised, except the first word which is always lower case. Issue: ```py from pydantic.utils import to_camel valid_pascal = "PascalCase" valid_camel = "camelCase" example = to_camel("i_shouldnt_be_capitalised") assert valid_pascal == to_camel("pascal_case") assert valid_camel != to_camel("camel_case") ``` suggested fix, rename `to_camel()` -> `to_pascal()`, and write new `to_camel()` function: ```py def to_pascal(string: str) -> str: return "".join(word.capitalize() for word in string.split("_")) def to_camel(string: str) -> str: if len(string) >= 1: pascal_string = to_pascal(string) return pascal_string[0].lower() + pascal_string[1:] return string.lower() ``` Alternatively, if there is code which will break because it is dependent on the `camel_case()` function remaining pascal case, then i propose we implement a new function called `to_lower_camel()` which implements the first letter lower case variant: ```py def to_camel(string: str) -> str: return "".join(word.capitalize() for word in string.split("_")) def to_lower_camel(string: str) -> str: if len(string) >= 1: pascal_string = to_camel(string) return pascal_string[0].lower() + pascal_string[1:] return string.lower() ```
[ { "content": "import warnings\nimport weakref\nfrom collections import OrderedDict, defaultdict, deque\nfrom copy import deepcopy\nfrom itertools import islice, zip_longest\nfrom types import BuiltinFunctionType, CodeType, FunctionType, GeneratorType, LambdaType, ModuleType\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Callable,\n Collection,\n Dict,\n Generator,\n Iterable,\n Iterator,\n List,\n Mapping,\n MutableMapping,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom typing_extensions import Annotated\n\nfrom .errors import ConfigError\nfrom .typing import (\n NoneType,\n WithArgsTypes,\n all_literal_values,\n display_as_type,\n get_args,\n get_origin,\n is_literal_type,\n is_union,\n)\nfrom .version import version_info\n\nif TYPE_CHECKING:\n from inspect import Signature\n from pathlib import Path\n\n from .config import BaseConfig\n from .dataclasses import Dataclass\n from .fields import ModelField\n from .main import BaseModel\n from .typing import AbstractSetIntStr, DictIntStrAny, IntStr, MappingIntStrAny, ReprArgs\n\n RichReprResult = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]]\n\n__all__ = (\n 'import_string',\n 'sequence_like',\n 'validate_field_name',\n 'lenient_isinstance',\n 'lenient_issubclass',\n 'in_ipython',\n 'deep_update',\n 'update_not_none',\n 'almost_equal_floats',\n 'get_model',\n 'to_camel',\n 'is_valid_field',\n 'smart_deepcopy',\n 'PyObjectStr',\n 'Representation',\n 'GetterDict',\n 'ValueItems',\n 'version_info', # required here to match behaviour in v1.3\n 'ClassAttribute',\n 'path_type',\n 'ROOT_KEY',\n 'get_unique_discriminator_alias',\n 'get_discriminator_alias_and_values',\n 'LimitedDict',\n)\n\nROOT_KEY = '__root__'\n# these are types that are returned unchanged by deepcopy\nIMMUTABLE_NON_COLLECTIONS_TYPES: Set[Type[Any]] = {\n int,\n float,\n complex,\n str,\n bool,\n bytes,\n type,\n NoneType,\n FunctionType,\n BuiltinFunctionType,\n LambdaType,\n weakref.ref,\n CodeType,\n # note: including ModuleType will differ from behaviour of deepcopy by not producing error.\n # It might be not a good idea in general, but considering that this function used only internally\n # against default values of fields, this will allow to actually have a field with module as default value\n ModuleType,\n NotImplemented.__class__,\n Ellipsis.__class__,\n}\n\n# these are types that if empty, might be copied with simple copy() instead of deepcopy()\nBUILTIN_COLLECTIONS: Set[Type[Any]] = {\n list,\n set,\n tuple,\n frozenset,\n dict,\n OrderedDict,\n defaultdict,\n deque,\n}\n\n\ndef import_string(dotted_path: str) -> Any:\n \"\"\"\n Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the\n last name in the path. Raise ImportError if the import fails.\n \"\"\"\n from importlib import import_module\n\n try:\n module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)\n except ValueError as e:\n raise ImportError(f'\"{dotted_path}\" doesn\\'t look like a module path') from e\n\n module = import_module(module_path)\n try:\n return getattr(module, class_name)\n except AttributeError as e:\n raise ImportError(f'Module \"{module_path}\" does not define a \"{class_name}\" attribute') from e\n\n\ndef truncate(v: Union[str], *, max_len: int = 80) -> str:\n \"\"\"\n Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long\n \"\"\"\n warnings.warn('`truncate` is no-longer used by pydantic and is deprecated', DeprecationWarning)\n if isinstance(v, str) and len(v) > (max_len - 2):\n # -3 so quote + string + … + quote has correct length\n return (v[: (max_len - 3)] + '…').__repr__()\n try:\n v = v.__repr__()\n except TypeError:\n v = v.__class__.__repr__(v) # in case v is a type\n if len(v) > max_len:\n v = v[: max_len - 1] + '…'\n return v\n\n\ndef sequence_like(v: Any) -> bool:\n return isinstance(v, (list, tuple, set, frozenset, GeneratorType, deque))\n\n\ndef validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None:\n \"\"\"\n Ensure that the field's name does not shadow an existing attribute of the model.\n \"\"\"\n for base in bases:\n if getattr(base, field_name, None):\n raise NameError(\n f'Field name \"{field_name}\" shadows a BaseModel attribute; '\n f'use a different field name with \"alias=\\'{field_name}\\'\".'\n )\n\n\ndef lenient_isinstance(o: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool:\n try:\n return isinstance(o, class_or_tuple) # type: ignore[arg-type]\n except TypeError:\n return False\n\n\ndef lenient_issubclass(cls: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool:\n try:\n return isinstance(cls, type) and issubclass(cls, class_or_tuple) # type: ignore[arg-type]\n except TypeError:\n if isinstance(cls, WithArgsTypes):\n return False\n raise # pragma: no cover\n\n\ndef in_ipython() -> bool:\n \"\"\"\n Check whether we're in an ipython environment, including jupyter notebooks.\n \"\"\"\n try:\n eval('__IPYTHON__')\n except NameError:\n return False\n else: # pragma: no cover\n return True\n\n\nKeyType = TypeVar('KeyType')\n\n\ndef deep_update(mapping: Dict[KeyType, Any], *updating_mappings: Dict[KeyType, Any]) -> Dict[KeyType, Any]:\n updated_mapping = mapping.copy()\n for updating_mapping in updating_mappings:\n for k, v in updating_mapping.items():\n if k in updated_mapping and isinstance(updated_mapping[k], dict) and isinstance(v, dict):\n updated_mapping[k] = deep_update(updated_mapping[k], v)\n else:\n updated_mapping[k] = v\n return updated_mapping\n\n\ndef update_not_none(mapping: Dict[Any, Any], **update: Any) -> None:\n mapping.update({k: v for k, v in update.items() if v is not None})\n\n\ndef almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:\n \"\"\"\n Return True if two floats are almost equal\n \"\"\"\n return abs(value_1 - value_2) <= delta\n\n\ndef generate_model_signature(\n init: Callable[..., None], fields: Dict[str, 'ModelField'], config: Type['BaseConfig']\n) -> 'Signature':\n \"\"\"\n Generate signature for model based on its fields\n \"\"\"\n from inspect import Parameter, Signature, signature\n\n from .config import Extra\n\n present_params = signature(init).parameters.values()\n merged_params: Dict[str, Parameter] = {}\n var_kw = None\n use_var_kw = False\n\n for param in islice(present_params, 1, None): # skip self arg\n if param.kind is param.VAR_KEYWORD:\n var_kw = param\n continue\n merged_params[param.name] = param\n\n if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through\n allow_names = config.allow_population_by_field_name\n for field_name, field in fields.items():\n param_name = field.alias\n if field_name in merged_params or param_name in merged_params:\n continue\n elif not param_name.isidentifier():\n if allow_names and field_name.isidentifier():\n param_name = field_name\n else:\n use_var_kw = True\n continue\n\n # TODO: replace annotation with actual expected types once #1055 solved\n kwargs = {'default': field.default} if not field.required else {}\n merged_params[param_name] = Parameter(\n param_name, Parameter.KEYWORD_ONLY, annotation=field.outer_type_, **kwargs\n )\n\n if config.extra is Extra.allow:\n use_var_kw = True\n\n if var_kw and use_var_kw:\n # Make sure the parameter for extra kwargs\n # does not have the same name as a field\n default_model_signature = [\n ('__pydantic_self__', Parameter.POSITIONAL_OR_KEYWORD),\n ('data', Parameter.VAR_KEYWORD),\n ]\n if [(p.name, p.kind) for p in present_params] == default_model_signature:\n # if this is the standard model signature, use extra_data as the extra args name\n var_kw_name = 'extra_data'\n else:\n # else start from var_kw\n var_kw_name = var_kw.name\n\n # generate a name that's definitely unique\n while var_kw_name in fields:\n var_kw_name += '_'\n merged_params[var_kw_name] = var_kw.replace(name=var_kw_name)\n\n return Signature(parameters=list(merged_params.values()), return_annotation=None)\n\n\ndef get_model(obj: Union[Type['BaseModel'], Type['Dataclass']]) -> Type['BaseModel']:\n from .main import BaseModel\n\n try:\n model_cls = obj.__pydantic_model__ # type: ignore\n except AttributeError:\n model_cls = obj\n\n if not issubclass(model_cls, BaseModel):\n raise TypeError('Unsupported type, must be either BaseModel or dataclass')\n return model_cls\n\n\ndef to_camel(string: str) -> str:\n return ''.join(word.capitalize() for word in string.split('_'))\n\n\nT = TypeVar('T')\n\n\ndef unique_list(\n input_list: Union[List[T], Tuple[T, ...]],\n *,\n name_factory: Callable[[T], str] = str,\n) -> List[T]:\n \"\"\"\n Make a list unique while maintaining order.\n We update the list if another one with the same name is set\n (e.g. root validator overridden in subclass)\n \"\"\"\n result: List[T] = []\n result_names: List[str] = []\n for v in input_list:\n v_name = name_factory(v)\n if v_name not in result_names:\n result_names.append(v_name)\n result.append(v)\n else:\n result[result_names.index(v_name)] = v\n\n return result\n\n\nclass PyObjectStr(str):\n \"\"\"\n String class where repr doesn't include quotes. Useful with Representation when you want to return a string\n representation of something that valid (or pseudo-valid) python.\n \"\"\"\n\n def __repr__(self) -> str:\n return str(self)\n\n\nclass Representation:\n \"\"\"\n Mixin to provide __str__, __repr__, and __pretty__ methods. See #884 for more details.\n\n __pretty__ is used by [devtools](https://python-devtools.helpmanual.io/) to provide human readable representations\n of objects.\n \"\"\"\n\n __slots__: Tuple[str, ...] = tuple()\n\n def __repr_args__(self) -> 'ReprArgs':\n \"\"\"\n Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden.\n\n Can either return:\n * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]`\n * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]`\n \"\"\"\n attrs = ((s, getattr(self, s)) for s in self.__slots__)\n return [(a, v) for a, v in attrs if v is not None]\n\n def __repr_name__(self) -> str:\n \"\"\"\n Name of the instance's class, used in __repr__.\n \"\"\"\n return self.__class__.__name__\n\n def __repr_str__(self, join_str: str) -> str:\n return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())\n\n def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, None, None]:\n \"\"\"\n Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects\n \"\"\"\n yield self.__repr_name__() + '('\n yield 1\n for name, value in self.__repr_args__():\n if name is not None:\n yield name + '='\n yield fmt(value)\n yield ','\n yield 0\n yield -1\n yield ')'\n\n def __str__(self) -> str:\n return self.__repr_str__(' ')\n\n def __repr__(self) -> str:\n return f'{self.__repr_name__()}({self.__repr_str__(\", \")})'\n\n def __rich_repr__(self) -> 'RichReprResult':\n \"\"\"Get fields for Rich library\"\"\"\n for name, field_repr in self.__repr_args__():\n if name is None:\n yield field_repr\n else:\n yield name, field_repr\n\n\nclass GetterDict(Representation):\n \"\"\"\n Hack to make object's smell just enough like dicts for validate_model.\n\n We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves.\n \"\"\"\n\n __slots__ = ('_obj',)\n\n def __init__(self, obj: Any):\n self._obj = obj\n\n def __getitem__(self, key: str) -> Any:\n try:\n return getattr(self._obj, key)\n except AttributeError as e:\n raise KeyError(key) from e\n\n def get(self, key: Any, default: Any = None) -> Any:\n return getattr(self._obj, key, default)\n\n def extra_keys(self) -> Set[Any]:\n \"\"\"\n We don't want to get any other attributes of obj if the model didn't explicitly ask for them\n \"\"\"\n return set()\n\n def keys(self) -> List[Any]:\n \"\"\"\n Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python\n dictionaries.\n \"\"\"\n return list(self)\n\n def values(self) -> List[Any]:\n return [self[k] for k in self]\n\n def items(self) -> Iterator[Tuple[str, Any]]:\n for k in self:\n yield k, self.get(k)\n\n def __iter__(self) -> Iterator[str]:\n for name in dir(self._obj):\n if not name.startswith('_'):\n yield name\n\n def __len__(self) -> int:\n return sum(1 for _ in self)\n\n def __contains__(self, item: Any) -> bool:\n return item in self.keys()\n\n def __eq__(self, other: Any) -> bool:\n return dict(self) == dict(other.items())\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, dict(self))]\n\n def __repr_name__(self) -> str:\n return f'GetterDict[{display_as_type(self._obj)}]'\n\n\nclass ValueItems(Representation):\n \"\"\"\n Class for more convenient calculation of excluded or included fields on values.\n \"\"\"\n\n __slots__ = ('_items', '_type')\n\n def __init__(self, value: Any, items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> None:\n items = self._coerce_items(items)\n\n if isinstance(value, (list, tuple)):\n items = self._normalize_indexes(items, len(value))\n\n self._items: 'MappingIntStrAny' = items\n\n def is_excluded(self, item: Any) -> bool:\n \"\"\"\n Check if item is fully excluded.\n\n :param item: key or index of a value\n \"\"\"\n return self.is_true(self._items.get(item))\n\n def is_included(self, item: Any) -> bool:\n \"\"\"\n Check if value is contained in self._items\n\n :param item: key or index of value\n \"\"\"\n return item in self._items\n\n def for_element(self, e: 'IntStr') -> Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']]:\n \"\"\"\n :param e: key or index of element on value\n :return: raw values for element if self._items is dict and contain needed element\n \"\"\"\n\n item = self._items.get(e)\n return item if not self.is_true(item) else None\n\n def _normalize_indexes(self, items: 'MappingIntStrAny', v_length: int) -> 'DictIntStrAny':\n \"\"\"\n :param items: dict or set of indexes which will be normalized\n :param v_length: length of sequence indexes of which will be\n\n >>> self._normalize_indexes({0: True, -2: True, -1: True}, 4)\n {0: True, 2: True, 3: True}\n >>> self._normalize_indexes({'__all__': True}, 4)\n {0: True, 1: True, 2: True, 3: True}\n \"\"\"\n\n normalized_items: 'DictIntStrAny' = {}\n all_items = None\n for i, v in items.items():\n if not (isinstance(v, Mapping) or isinstance(v, AbstractSet) or self.is_true(v)):\n raise TypeError(f'Unexpected type of exclude value for index \"{i}\" {v.__class__}')\n if i == '__all__':\n all_items = self._coerce_value(v)\n continue\n if not isinstance(i, int):\n raise TypeError(\n 'Excluding fields from a sequence of sub-models or dicts must be performed index-wise: '\n 'expected integer keys or keyword \"__all__\"'\n )\n normalized_i = v_length + i if i < 0 else i\n normalized_items[normalized_i] = self.merge(v, normalized_items.get(normalized_i))\n\n if not all_items:\n return normalized_items\n if self.is_true(all_items):\n for i in range(v_length):\n normalized_items.setdefault(i, ...)\n return normalized_items\n for i in range(v_length):\n normalized_item = normalized_items.setdefault(i, {})\n if not self.is_true(normalized_item):\n normalized_items[i] = self.merge(all_items, normalized_item)\n return normalized_items\n\n @classmethod\n def merge(cls, base: Any, override: Any, intersect: bool = False) -> Any:\n \"\"\"\n Merge a ``base`` item with an ``override`` item.\n\n Both ``base`` and ``override`` are converted to dictionaries if possible.\n Sets are converted to dictionaries with the sets entries as keys and\n Ellipsis as values.\n\n Each key-value pair existing in ``base`` is merged with ``override``,\n while the rest of the key-value pairs are updated recursively with this function.\n\n Merging takes place based on the \"union\" of keys if ``intersect`` is\n set to ``False`` (default) and on the intersection of keys if\n ``intersect`` is set to ``True``.\n \"\"\"\n override = cls._coerce_value(override)\n base = cls._coerce_value(base)\n if override is None:\n return base\n if cls.is_true(base) or base is None:\n return override\n if cls.is_true(override):\n return base if intersect else override\n\n # intersection or union of keys while preserving ordering:\n if intersect:\n merge_keys = [k for k in base if k in override] + [k for k in override if k in base]\n else:\n merge_keys = list(base) + [k for k in override if k not in base]\n\n merged: 'DictIntStrAny' = {}\n for k in merge_keys:\n merged_item = cls.merge(base.get(k), override.get(k), intersect=intersect)\n if merged_item is not None:\n merged[k] = merged_item\n\n return merged\n\n @staticmethod\n def _coerce_items(items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> 'MappingIntStrAny':\n if isinstance(items, Mapping):\n pass\n elif isinstance(items, AbstractSet):\n items = dict.fromkeys(items, ...)\n else:\n class_name = getattr(items, '__class__', '???')\n raise TypeError(f'Unexpected type of exclude value {class_name}')\n return items\n\n @classmethod\n def _coerce_value(cls, value: Any) -> Any:\n if value is None or cls.is_true(value):\n return value\n return cls._coerce_items(value)\n\n @staticmethod\n def is_true(v: Any) -> bool:\n return v is True or v is ...\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, self._items)]\n\n\nclass ClassAttribute:\n \"\"\"\n Hide class attribute from its instances\n \"\"\"\n\n __slots__ = (\n 'name',\n 'value',\n )\n\n def __init__(self, name: str, value: Any) -> None:\n self.name = name\n self.value = value\n\n def __get__(self, instance: Any, owner: Type[Any]) -> None:\n if instance is None:\n return self.value\n raise AttributeError(f'{self.name!r} attribute of {owner.__name__!r} is class-only')\n\n\npath_types = {\n 'is_dir': 'directory',\n 'is_file': 'file',\n 'is_mount': 'mount point',\n 'is_symlink': 'symlink',\n 'is_block_device': 'block device',\n 'is_char_device': 'char device',\n 'is_fifo': 'FIFO',\n 'is_socket': 'socket',\n}\n\n\ndef path_type(p: 'Path') -> str:\n \"\"\"\n Find out what sort of thing a path is.\n \"\"\"\n assert p.exists(), 'path does not exist'\n for method, name in path_types.items():\n if getattr(p, method)():\n return name\n\n return 'unknown'\n\n\nObj = TypeVar('Obj')\n\n\ndef smart_deepcopy(obj: Obj) -> Obj:\n \"\"\"\n Return type as is for immutable built-in types\n Use obj.copy() for built-in empty collections\n Use copy.deepcopy() for non-empty collections and unknown objects\n \"\"\"\n\n obj_type = obj.__class__\n if obj_type in IMMUTABLE_NON_COLLECTIONS_TYPES:\n return obj # fastest case: obj is immutable and not collection therefore will not be copied anyway\n elif not obj and obj_type in BUILTIN_COLLECTIONS:\n # faster way for empty collections, no need to copy its members\n return obj if obj_type is tuple else obj.copy() # type: ignore # tuple doesn't have copy method\n return deepcopy(obj) # slowest way when we actually might need a deepcopy\n\n\ndef is_valid_field(name: str) -> bool:\n if not name.startswith('_'):\n return True\n return ROOT_KEY == name\n\n\ndef is_valid_private_name(name: str) -> bool:\n return not is_valid_field(name) and name not in {\n '__annotations__',\n '__classcell__',\n '__doc__',\n '__module__',\n '__orig_bases__',\n '__qualname__',\n }\n\n\n_EMPTY = object()\n\n\ndef all_identical(left: Iterable[Any], right: Iterable[Any]) -> bool:\n \"\"\"\n Check that the items of `left` are the same objects as those in `right`.\n\n >>> a, b = object(), object()\n >>> all_identical([a, b, a], [a, b, a])\n True\n >>> all_identical([a, b, [a]], [a, b, [a]]) # new list object, while \"equal\" is not \"identical\"\n False\n \"\"\"\n for left_item, right_item in zip_longest(left, right, fillvalue=_EMPTY):\n if left_item is not right_item:\n return False\n return True\n\n\ndef get_unique_discriminator_alias(all_aliases: Collection[str], discriminator_key: str) -> str:\n \"\"\"Validate that all aliases are the same and if that's the case return the alias\"\"\"\n unique_aliases = set(all_aliases)\n if len(unique_aliases) > 1:\n raise ConfigError(\n f'Aliases for discriminator {discriminator_key!r} must be the same (got {\", \".join(sorted(all_aliases))})'\n )\n return unique_aliases.pop()\n\n\ndef get_discriminator_alias_and_values(tp: Any, discriminator_key: str) -> Tuple[str, Tuple[str, ...]]:\n \"\"\"\n Get alias and all valid values in the `Literal` type of the discriminator field\n `tp` can be a `BaseModel` class or directly an `Annotated` `Union` of many.\n \"\"\"\n is_root_model = getattr(tp, '__custom_root_type__', False)\n\n if get_origin(tp) is Annotated:\n tp = get_args(tp)[0]\n\n if hasattr(tp, '__pydantic_model__'):\n tp = tp.__pydantic_model__\n\n if is_union(get_origin(tp)):\n alias, all_values = _get_union_alias_and_all_values(tp, discriminator_key)\n return alias, tuple(v for values in all_values for v in values)\n elif is_root_model:\n union_type = tp.__fields__[ROOT_KEY].type_\n alias, all_values = _get_union_alias_and_all_values(union_type, discriminator_key)\n\n if len(set(all_values)) > 1:\n raise ConfigError(\n f'Field {discriminator_key!r} is not the same for all submodels of {display_as_type(tp)!r}'\n )\n\n return alias, all_values[0]\n\n else:\n try:\n t_discriminator_type = tp.__fields__[discriminator_key].type_\n except AttributeError as e:\n raise TypeError(f'Type {tp.__name__!r} is not a valid `BaseModel` or `dataclass`') from e\n except KeyError as e:\n raise ConfigError(f'Model {tp.__name__!r} needs a discriminator field for key {discriminator_key!r}') from e\n\n if not is_literal_type(t_discriminator_type):\n raise ConfigError(f'Field {discriminator_key!r} of model {tp.__name__!r} needs to be a `Literal`')\n\n return tp.__fields__[discriminator_key].alias, all_literal_values(t_discriminator_type)\n\n\ndef _get_union_alias_and_all_values(\n union_type: Type[Any], discriminator_key: str\n) -> Tuple[str, Tuple[Tuple[str, ...], ...]]:\n zipped_aliases_values = [get_discriminator_alias_and_values(t, discriminator_key) for t in get_args(union_type)]\n # unzip: [('alias_a',('v1', 'v2)), ('alias_b', ('v3',))] => [('alias_a', 'alias_b'), (('v1', 'v2'), ('v3',))]\n all_aliases, all_values = zip(*zipped_aliases_values)\n return get_unique_discriminator_alias(all_aliases, discriminator_key), all_values\n\n\nKT = TypeVar('KT')\nVT = TypeVar('VT')\nif TYPE_CHECKING:\n # Annoying inheriting from `MutableMapping` and `dict` breaks cython, hence this work around\n class LimitedDict(dict, MutableMapping[KT, VT]): # type: ignore[type-arg]\n def __init__(self, size_limit: int = 1000):\n ...\n\nelse:\n\n class LimitedDict(dict):\n \"\"\"\n Limit the size/length of a dict used for caching to avoid unlimited increase in memory usage.\n\n Since the dict is ordered, and we always remove elements from the beginning, this is effectively a FIFO cache.\n\n Annoying inheriting from `MutableMapping` breaks cython.\n \"\"\"\n\n def __init__(self, size_limit: int = 1000):\n self.size_limit = size_limit\n super().__init__()\n\n def __setitem__(self, __key: Any, __value: Any) -> None:\n super().__setitem__(__key, __value)\n if len(self) > self.size_limit:\n excess = len(self) - self.size_limit + self.size_limit // 10\n to_remove = list(self.keys())[:excess]\n for key in to_remove:\n del self[key]\n\n def __class_getitem__(cls, *args: Any) -> Any:\n # to avoid errors with 3.7\n pass\n", "path": "pydantic/utils.py" } ]
[ { "content": "import warnings\nimport weakref\nfrom collections import OrderedDict, defaultdict, deque\nfrom copy import deepcopy\nfrom itertools import islice, zip_longest\nfrom types import BuiltinFunctionType, CodeType, FunctionType, GeneratorType, LambdaType, ModuleType\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Callable,\n Collection,\n Dict,\n Generator,\n Iterable,\n Iterator,\n List,\n Mapping,\n MutableMapping,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom typing_extensions import Annotated\n\nfrom .errors import ConfigError\nfrom .typing import (\n NoneType,\n WithArgsTypes,\n all_literal_values,\n display_as_type,\n get_args,\n get_origin,\n is_literal_type,\n is_union,\n)\nfrom .version import version_info\n\nif TYPE_CHECKING:\n from inspect import Signature\n from pathlib import Path\n\n from .config import BaseConfig\n from .dataclasses import Dataclass\n from .fields import ModelField\n from .main import BaseModel\n from .typing import AbstractSetIntStr, DictIntStrAny, IntStr, MappingIntStrAny, ReprArgs\n\n RichReprResult = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]]\n\n__all__ = (\n 'import_string',\n 'sequence_like',\n 'validate_field_name',\n 'lenient_isinstance',\n 'lenient_issubclass',\n 'in_ipython',\n 'deep_update',\n 'update_not_none',\n 'almost_equal_floats',\n 'get_model',\n 'to_camel',\n 'is_valid_field',\n 'smart_deepcopy',\n 'PyObjectStr',\n 'Representation',\n 'GetterDict',\n 'ValueItems',\n 'version_info', # required here to match behaviour in v1.3\n 'ClassAttribute',\n 'path_type',\n 'ROOT_KEY',\n 'get_unique_discriminator_alias',\n 'get_discriminator_alias_and_values',\n 'LimitedDict',\n)\n\nROOT_KEY = '__root__'\n# these are types that are returned unchanged by deepcopy\nIMMUTABLE_NON_COLLECTIONS_TYPES: Set[Type[Any]] = {\n int,\n float,\n complex,\n str,\n bool,\n bytes,\n type,\n NoneType,\n FunctionType,\n BuiltinFunctionType,\n LambdaType,\n weakref.ref,\n CodeType,\n # note: including ModuleType will differ from behaviour of deepcopy by not producing error.\n # It might be not a good idea in general, but considering that this function used only internally\n # against default values of fields, this will allow to actually have a field with module as default value\n ModuleType,\n NotImplemented.__class__,\n Ellipsis.__class__,\n}\n\n# these are types that if empty, might be copied with simple copy() instead of deepcopy()\nBUILTIN_COLLECTIONS: Set[Type[Any]] = {\n list,\n set,\n tuple,\n frozenset,\n dict,\n OrderedDict,\n defaultdict,\n deque,\n}\n\n\ndef import_string(dotted_path: str) -> Any:\n \"\"\"\n Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the\n last name in the path. Raise ImportError if the import fails.\n \"\"\"\n from importlib import import_module\n\n try:\n module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)\n except ValueError as e:\n raise ImportError(f'\"{dotted_path}\" doesn\\'t look like a module path') from e\n\n module = import_module(module_path)\n try:\n return getattr(module, class_name)\n except AttributeError as e:\n raise ImportError(f'Module \"{module_path}\" does not define a \"{class_name}\" attribute') from e\n\n\ndef truncate(v: Union[str], *, max_len: int = 80) -> str:\n \"\"\"\n Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long\n \"\"\"\n warnings.warn('`truncate` is no-longer used by pydantic and is deprecated', DeprecationWarning)\n if isinstance(v, str) and len(v) > (max_len - 2):\n # -3 so quote + string + … + quote has correct length\n return (v[: (max_len - 3)] + '…').__repr__()\n try:\n v = v.__repr__()\n except TypeError:\n v = v.__class__.__repr__(v) # in case v is a type\n if len(v) > max_len:\n v = v[: max_len - 1] + '…'\n return v\n\n\ndef sequence_like(v: Any) -> bool:\n return isinstance(v, (list, tuple, set, frozenset, GeneratorType, deque))\n\n\ndef validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None:\n \"\"\"\n Ensure that the field's name does not shadow an existing attribute of the model.\n \"\"\"\n for base in bases:\n if getattr(base, field_name, None):\n raise NameError(\n f'Field name \"{field_name}\" shadows a BaseModel attribute; '\n f'use a different field name with \"alias=\\'{field_name}\\'\".'\n )\n\n\ndef lenient_isinstance(o: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool:\n try:\n return isinstance(o, class_or_tuple) # type: ignore[arg-type]\n except TypeError:\n return False\n\n\ndef lenient_issubclass(cls: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool:\n try:\n return isinstance(cls, type) and issubclass(cls, class_or_tuple) # type: ignore[arg-type]\n except TypeError:\n if isinstance(cls, WithArgsTypes):\n return False\n raise # pragma: no cover\n\n\ndef in_ipython() -> bool:\n \"\"\"\n Check whether we're in an ipython environment, including jupyter notebooks.\n \"\"\"\n try:\n eval('__IPYTHON__')\n except NameError:\n return False\n else: # pragma: no cover\n return True\n\n\nKeyType = TypeVar('KeyType')\n\n\ndef deep_update(mapping: Dict[KeyType, Any], *updating_mappings: Dict[KeyType, Any]) -> Dict[KeyType, Any]:\n updated_mapping = mapping.copy()\n for updating_mapping in updating_mappings:\n for k, v in updating_mapping.items():\n if k in updated_mapping and isinstance(updated_mapping[k], dict) and isinstance(v, dict):\n updated_mapping[k] = deep_update(updated_mapping[k], v)\n else:\n updated_mapping[k] = v\n return updated_mapping\n\n\ndef update_not_none(mapping: Dict[Any, Any], **update: Any) -> None:\n mapping.update({k: v for k, v in update.items() if v is not None})\n\n\ndef almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:\n \"\"\"\n Return True if two floats are almost equal\n \"\"\"\n return abs(value_1 - value_2) <= delta\n\n\ndef generate_model_signature(\n init: Callable[..., None], fields: Dict[str, 'ModelField'], config: Type['BaseConfig']\n) -> 'Signature':\n \"\"\"\n Generate signature for model based on its fields\n \"\"\"\n from inspect import Parameter, Signature, signature\n\n from .config import Extra\n\n present_params = signature(init).parameters.values()\n merged_params: Dict[str, Parameter] = {}\n var_kw = None\n use_var_kw = False\n\n for param in islice(present_params, 1, None): # skip self arg\n if param.kind is param.VAR_KEYWORD:\n var_kw = param\n continue\n merged_params[param.name] = param\n\n if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through\n allow_names = config.allow_population_by_field_name\n for field_name, field in fields.items():\n param_name = field.alias\n if field_name in merged_params or param_name in merged_params:\n continue\n elif not param_name.isidentifier():\n if allow_names and field_name.isidentifier():\n param_name = field_name\n else:\n use_var_kw = True\n continue\n\n # TODO: replace annotation with actual expected types once #1055 solved\n kwargs = {'default': field.default} if not field.required else {}\n merged_params[param_name] = Parameter(\n param_name, Parameter.KEYWORD_ONLY, annotation=field.outer_type_, **kwargs\n )\n\n if config.extra is Extra.allow:\n use_var_kw = True\n\n if var_kw and use_var_kw:\n # Make sure the parameter for extra kwargs\n # does not have the same name as a field\n default_model_signature = [\n ('__pydantic_self__', Parameter.POSITIONAL_OR_KEYWORD),\n ('data', Parameter.VAR_KEYWORD),\n ]\n if [(p.name, p.kind) for p in present_params] == default_model_signature:\n # if this is the standard model signature, use extra_data as the extra args name\n var_kw_name = 'extra_data'\n else:\n # else start from var_kw\n var_kw_name = var_kw.name\n\n # generate a name that's definitely unique\n while var_kw_name in fields:\n var_kw_name += '_'\n merged_params[var_kw_name] = var_kw.replace(name=var_kw_name)\n\n return Signature(parameters=list(merged_params.values()), return_annotation=None)\n\n\ndef get_model(obj: Union[Type['BaseModel'], Type['Dataclass']]) -> Type['BaseModel']:\n from .main import BaseModel\n\n try:\n model_cls = obj.__pydantic_model__ # type: ignore\n except AttributeError:\n model_cls = obj\n\n if not issubclass(model_cls, BaseModel):\n raise TypeError('Unsupported type, must be either BaseModel or dataclass')\n return model_cls\n\n\ndef to_camel(string: str) -> str:\n return ''.join(word.capitalize() for word in string.split('_'))\n\n\ndef to_lower_camel(string: str) -> str:\n if len(string) >= 1:\n pascal_string = to_camel(string)\n return pascal_string[0].lower() + pascal_string[1:]\n return string.lower()\n\n\nT = TypeVar('T')\n\n\ndef unique_list(\n input_list: Union[List[T], Tuple[T, ...]],\n *,\n name_factory: Callable[[T], str] = str,\n) -> List[T]:\n \"\"\"\n Make a list unique while maintaining order.\n We update the list if another one with the same name is set\n (e.g. root validator overridden in subclass)\n \"\"\"\n result: List[T] = []\n result_names: List[str] = []\n for v in input_list:\n v_name = name_factory(v)\n if v_name not in result_names:\n result_names.append(v_name)\n result.append(v)\n else:\n result[result_names.index(v_name)] = v\n\n return result\n\n\nclass PyObjectStr(str):\n \"\"\"\n String class where repr doesn't include quotes. Useful with Representation when you want to return a string\n representation of something that valid (or pseudo-valid) python.\n \"\"\"\n\n def __repr__(self) -> str:\n return str(self)\n\n\nclass Representation:\n \"\"\"\n Mixin to provide __str__, __repr__, and __pretty__ methods. See #884 for more details.\n\n __pretty__ is used by [devtools](https://python-devtools.helpmanual.io/) to provide human readable representations\n of objects.\n \"\"\"\n\n __slots__: Tuple[str, ...] = tuple()\n\n def __repr_args__(self) -> 'ReprArgs':\n \"\"\"\n Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden.\n\n Can either return:\n * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]`\n * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]`\n \"\"\"\n attrs = ((s, getattr(self, s)) for s in self.__slots__)\n return [(a, v) for a, v in attrs if v is not None]\n\n def __repr_name__(self) -> str:\n \"\"\"\n Name of the instance's class, used in __repr__.\n \"\"\"\n return self.__class__.__name__\n\n def __repr_str__(self, join_str: str) -> str:\n return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())\n\n def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, None, None]:\n \"\"\"\n Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects\n \"\"\"\n yield self.__repr_name__() + '('\n yield 1\n for name, value in self.__repr_args__():\n if name is not None:\n yield name + '='\n yield fmt(value)\n yield ','\n yield 0\n yield -1\n yield ')'\n\n def __str__(self) -> str:\n return self.__repr_str__(' ')\n\n def __repr__(self) -> str:\n return f'{self.__repr_name__()}({self.__repr_str__(\", \")})'\n\n def __rich_repr__(self) -> 'RichReprResult':\n \"\"\"Get fields for Rich library\"\"\"\n for name, field_repr in self.__repr_args__():\n if name is None:\n yield field_repr\n else:\n yield name, field_repr\n\n\nclass GetterDict(Representation):\n \"\"\"\n Hack to make object's smell just enough like dicts for validate_model.\n\n We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves.\n \"\"\"\n\n __slots__ = ('_obj',)\n\n def __init__(self, obj: Any):\n self._obj = obj\n\n def __getitem__(self, key: str) -> Any:\n try:\n return getattr(self._obj, key)\n except AttributeError as e:\n raise KeyError(key) from e\n\n def get(self, key: Any, default: Any = None) -> Any:\n return getattr(self._obj, key, default)\n\n def extra_keys(self) -> Set[Any]:\n \"\"\"\n We don't want to get any other attributes of obj if the model didn't explicitly ask for them\n \"\"\"\n return set()\n\n def keys(self) -> List[Any]:\n \"\"\"\n Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python\n dictionaries.\n \"\"\"\n return list(self)\n\n def values(self) -> List[Any]:\n return [self[k] for k in self]\n\n def items(self) -> Iterator[Tuple[str, Any]]:\n for k in self:\n yield k, self.get(k)\n\n def __iter__(self) -> Iterator[str]:\n for name in dir(self._obj):\n if not name.startswith('_'):\n yield name\n\n def __len__(self) -> int:\n return sum(1 for _ in self)\n\n def __contains__(self, item: Any) -> bool:\n return item in self.keys()\n\n def __eq__(self, other: Any) -> bool:\n return dict(self) == dict(other.items())\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, dict(self))]\n\n def __repr_name__(self) -> str:\n return f'GetterDict[{display_as_type(self._obj)}]'\n\n\nclass ValueItems(Representation):\n \"\"\"\n Class for more convenient calculation of excluded or included fields on values.\n \"\"\"\n\n __slots__ = ('_items', '_type')\n\n def __init__(self, value: Any, items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> None:\n items = self._coerce_items(items)\n\n if isinstance(value, (list, tuple)):\n items = self._normalize_indexes(items, len(value))\n\n self._items: 'MappingIntStrAny' = items\n\n def is_excluded(self, item: Any) -> bool:\n \"\"\"\n Check if item is fully excluded.\n\n :param item: key or index of a value\n \"\"\"\n return self.is_true(self._items.get(item))\n\n def is_included(self, item: Any) -> bool:\n \"\"\"\n Check if value is contained in self._items\n\n :param item: key or index of value\n \"\"\"\n return item in self._items\n\n def for_element(self, e: 'IntStr') -> Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']]:\n \"\"\"\n :param e: key or index of element on value\n :return: raw values for element if self._items is dict and contain needed element\n \"\"\"\n\n item = self._items.get(e)\n return item if not self.is_true(item) else None\n\n def _normalize_indexes(self, items: 'MappingIntStrAny', v_length: int) -> 'DictIntStrAny':\n \"\"\"\n :param items: dict or set of indexes which will be normalized\n :param v_length: length of sequence indexes of which will be\n\n >>> self._normalize_indexes({0: True, -2: True, -1: True}, 4)\n {0: True, 2: True, 3: True}\n >>> self._normalize_indexes({'__all__': True}, 4)\n {0: True, 1: True, 2: True, 3: True}\n \"\"\"\n\n normalized_items: 'DictIntStrAny' = {}\n all_items = None\n for i, v in items.items():\n if not (isinstance(v, Mapping) or isinstance(v, AbstractSet) or self.is_true(v)):\n raise TypeError(f'Unexpected type of exclude value for index \"{i}\" {v.__class__}')\n if i == '__all__':\n all_items = self._coerce_value(v)\n continue\n if not isinstance(i, int):\n raise TypeError(\n 'Excluding fields from a sequence of sub-models or dicts must be performed index-wise: '\n 'expected integer keys or keyword \"__all__\"'\n )\n normalized_i = v_length + i if i < 0 else i\n normalized_items[normalized_i] = self.merge(v, normalized_items.get(normalized_i))\n\n if not all_items:\n return normalized_items\n if self.is_true(all_items):\n for i in range(v_length):\n normalized_items.setdefault(i, ...)\n return normalized_items\n for i in range(v_length):\n normalized_item = normalized_items.setdefault(i, {})\n if not self.is_true(normalized_item):\n normalized_items[i] = self.merge(all_items, normalized_item)\n return normalized_items\n\n @classmethod\n def merge(cls, base: Any, override: Any, intersect: bool = False) -> Any:\n \"\"\"\n Merge a ``base`` item with an ``override`` item.\n\n Both ``base`` and ``override`` are converted to dictionaries if possible.\n Sets are converted to dictionaries with the sets entries as keys and\n Ellipsis as values.\n\n Each key-value pair existing in ``base`` is merged with ``override``,\n while the rest of the key-value pairs are updated recursively with this function.\n\n Merging takes place based on the \"union\" of keys if ``intersect`` is\n set to ``False`` (default) and on the intersection of keys if\n ``intersect`` is set to ``True``.\n \"\"\"\n override = cls._coerce_value(override)\n base = cls._coerce_value(base)\n if override is None:\n return base\n if cls.is_true(base) or base is None:\n return override\n if cls.is_true(override):\n return base if intersect else override\n\n # intersection or union of keys while preserving ordering:\n if intersect:\n merge_keys = [k for k in base if k in override] + [k for k in override if k in base]\n else:\n merge_keys = list(base) + [k for k in override if k not in base]\n\n merged: 'DictIntStrAny' = {}\n for k in merge_keys:\n merged_item = cls.merge(base.get(k), override.get(k), intersect=intersect)\n if merged_item is not None:\n merged[k] = merged_item\n\n return merged\n\n @staticmethod\n def _coerce_items(items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> 'MappingIntStrAny':\n if isinstance(items, Mapping):\n pass\n elif isinstance(items, AbstractSet):\n items = dict.fromkeys(items, ...)\n else:\n class_name = getattr(items, '__class__', '???')\n raise TypeError(f'Unexpected type of exclude value {class_name}')\n return items\n\n @classmethod\n def _coerce_value(cls, value: Any) -> Any:\n if value is None or cls.is_true(value):\n return value\n return cls._coerce_items(value)\n\n @staticmethod\n def is_true(v: Any) -> bool:\n return v is True or v is ...\n\n def __repr_args__(self) -> 'ReprArgs':\n return [(None, self._items)]\n\n\nclass ClassAttribute:\n \"\"\"\n Hide class attribute from its instances\n \"\"\"\n\n __slots__ = (\n 'name',\n 'value',\n )\n\n def __init__(self, name: str, value: Any) -> None:\n self.name = name\n self.value = value\n\n def __get__(self, instance: Any, owner: Type[Any]) -> None:\n if instance is None:\n return self.value\n raise AttributeError(f'{self.name!r} attribute of {owner.__name__!r} is class-only')\n\n\npath_types = {\n 'is_dir': 'directory',\n 'is_file': 'file',\n 'is_mount': 'mount point',\n 'is_symlink': 'symlink',\n 'is_block_device': 'block device',\n 'is_char_device': 'char device',\n 'is_fifo': 'FIFO',\n 'is_socket': 'socket',\n}\n\n\ndef path_type(p: 'Path') -> str:\n \"\"\"\n Find out what sort of thing a path is.\n \"\"\"\n assert p.exists(), 'path does not exist'\n for method, name in path_types.items():\n if getattr(p, method)():\n return name\n\n return 'unknown'\n\n\nObj = TypeVar('Obj')\n\n\ndef smart_deepcopy(obj: Obj) -> Obj:\n \"\"\"\n Return type as is for immutable built-in types\n Use obj.copy() for built-in empty collections\n Use copy.deepcopy() for non-empty collections and unknown objects\n \"\"\"\n\n obj_type = obj.__class__\n if obj_type in IMMUTABLE_NON_COLLECTIONS_TYPES:\n return obj # fastest case: obj is immutable and not collection therefore will not be copied anyway\n elif not obj and obj_type in BUILTIN_COLLECTIONS:\n # faster way for empty collections, no need to copy its members\n return obj if obj_type is tuple else obj.copy() # type: ignore # tuple doesn't have copy method\n return deepcopy(obj) # slowest way when we actually might need a deepcopy\n\n\ndef is_valid_field(name: str) -> bool:\n if not name.startswith('_'):\n return True\n return ROOT_KEY == name\n\n\ndef is_valid_private_name(name: str) -> bool:\n return not is_valid_field(name) and name not in {\n '__annotations__',\n '__classcell__',\n '__doc__',\n '__module__',\n '__orig_bases__',\n '__qualname__',\n }\n\n\n_EMPTY = object()\n\n\ndef all_identical(left: Iterable[Any], right: Iterable[Any]) -> bool:\n \"\"\"\n Check that the items of `left` are the same objects as those in `right`.\n\n >>> a, b = object(), object()\n >>> all_identical([a, b, a], [a, b, a])\n True\n >>> all_identical([a, b, [a]], [a, b, [a]]) # new list object, while \"equal\" is not \"identical\"\n False\n \"\"\"\n for left_item, right_item in zip_longest(left, right, fillvalue=_EMPTY):\n if left_item is not right_item:\n return False\n return True\n\n\ndef get_unique_discriminator_alias(all_aliases: Collection[str], discriminator_key: str) -> str:\n \"\"\"Validate that all aliases are the same and if that's the case return the alias\"\"\"\n unique_aliases = set(all_aliases)\n if len(unique_aliases) > 1:\n raise ConfigError(\n f'Aliases for discriminator {discriminator_key!r} must be the same (got {\", \".join(sorted(all_aliases))})'\n )\n return unique_aliases.pop()\n\n\ndef get_discriminator_alias_and_values(tp: Any, discriminator_key: str) -> Tuple[str, Tuple[str, ...]]:\n \"\"\"\n Get alias and all valid values in the `Literal` type of the discriminator field\n `tp` can be a `BaseModel` class or directly an `Annotated` `Union` of many.\n \"\"\"\n is_root_model = getattr(tp, '__custom_root_type__', False)\n\n if get_origin(tp) is Annotated:\n tp = get_args(tp)[0]\n\n if hasattr(tp, '__pydantic_model__'):\n tp = tp.__pydantic_model__\n\n if is_union(get_origin(tp)):\n alias, all_values = _get_union_alias_and_all_values(tp, discriminator_key)\n return alias, tuple(v for values in all_values for v in values)\n elif is_root_model:\n union_type = tp.__fields__[ROOT_KEY].type_\n alias, all_values = _get_union_alias_and_all_values(union_type, discriminator_key)\n\n if len(set(all_values)) > 1:\n raise ConfigError(\n f'Field {discriminator_key!r} is not the same for all submodels of {display_as_type(tp)!r}'\n )\n\n return alias, all_values[0]\n\n else:\n try:\n t_discriminator_type = tp.__fields__[discriminator_key].type_\n except AttributeError as e:\n raise TypeError(f'Type {tp.__name__!r} is not a valid `BaseModel` or `dataclass`') from e\n except KeyError as e:\n raise ConfigError(f'Model {tp.__name__!r} needs a discriminator field for key {discriminator_key!r}') from e\n\n if not is_literal_type(t_discriminator_type):\n raise ConfigError(f'Field {discriminator_key!r} of model {tp.__name__!r} needs to be a `Literal`')\n\n return tp.__fields__[discriminator_key].alias, all_literal_values(t_discriminator_type)\n\n\ndef _get_union_alias_and_all_values(\n union_type: Type[Any], discriminator_key: str\n) -> Tuple[str, Tuple[Tuple[str, ...], ...]]:\n zipped_aliases_values = [get_discriminator_alias_and_values(t, discriminator_key) for t in get_args(union_type)]\n # unzip: [('alias_a',('v1', 'v2)), ('alias_b', ('v3',))] => [('alias_a', 'alias_b'), (('v1', 'v2'), ('v3',))]\n all_aliases, all_values = zip(*zipped_aliases_values)\n return get_unique_discriminator_alias(all_aliases, discriminator_key), all_values\n\n\nKT = TypeVar('KT')\nVT = TypeVar('VT')\nif TYPE_CHECKING:\n # Annoying inheriting from `MutableMapping` and `dict` breaks cython, hence this work around\n class LimitedDict(dict, MutableMapping[KT, VT]): # type: ignore[type-arg]\n def __init__(self, size_limit: int = 1000):\n ...\n\nelse:\n\n class LimitedDict(dict):\n \"\"\"\n Limit the size/length of a dict used for caching to avoid unlimited increase in memory usage.\n\n Since the dict is ordered, and we always remove elements from the beginning, this is effectively a FIFO cache.\n\n Annoying inheriting from `MutableMapping` breaks cython.\n \"\"\"\n\n def __init__(self, size_limit: int = 1000):\n self.size_limit = size_limit\n super().__init__()\n\n def __setitem__(self, __key: Any, __value: Any) -> None:\n super().__setitem__(__key, __value)\n if len(self) > self.size_limit:\n excess = len(self) - self.size_limit + self.size_limit // 10\n to_remove = list(self.keys())[:excess]\n for key in to_remove:\n del self[key]\n\n def __class_getitem__(cls, *args: Any) -> Any:\n # to avoid errors with 3.7\n pass\n", "path": "pydantic/utils.py" } ]
diff --git a/changes/3463-schlerp.md b/changes/3463-schlerp.md new file mode 100644 index 00000000000..bb1fb75d3e4 --- /dev/null +++ b/changes/3463-schlerp.md @@ -0,0 +1 @@ +created new function `to_lower_camel()` for "non pascal case" camel case. diff --git a/docs/usage/model_config.md b/docs/usage/model_config.md index b6c15e06c1c..8f05fa31566 100644 --- a/docs/usage/model_config.md +++ b/docs/usage/model_config.md @@ -143,7 +143,7 @@ _(This script is complete, it should run "as is")_ Here camel case refers to ["upper camel case"](https://en.wikipedia.org/wiki/Camel_case) aka pascal case e.g. `CamelCase`. If you'd like instead to use lower camel case e.g. `camelCase`, -it should be trivial to modify the `to_camel` function above. +instead use the `to_lower_camel` function. ## Alias Precedence diff --git a/pydantic/utils.py b/pydantic/utils.py index 31e74771387..62ec3fede18 100644 --- a/pydantic/utils.py +++ b/pydantic/utils.py @@ -303,6 +303,13 @@ def to_camel(string: str) -> str: return ''.join(word.capitalize() for word in string.split('_')) +def to_lower_camel(string: str) -> str: + if len(string) >= 1: + pascal_string = to_camel(string) + return pascal_string[0].lower() + pascal_string[1:] + return string.lower() + + T = TypeVar('T') diff --git a/tests/test_utils.py b/tests/test_utils.py index d7eaa74c697..eb7caa95fdb 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -37,6 +37,7 @@ lenient_issubclass, path_type, smart_deepcopy, + to_lower_camel, truncate, unique_list, ) @@ -528,6 +529,18 @@ def test_undefined_pickle(): assert undefined2 is Undefined +def test_on_lower_camel_zero_length(): + assert to_lower_camel('') == '' + + +def test_on_lower_camel_one_length(): + assert to_lower_camel('a') == 'a' + + +def test_on_lower_camel_many_length(): + assert to_lower_camel('i_like_turtles') == 'iLikeTurtles' + + def test_limited_dict(): d = LimitedDict(10) d[1] = '1'
pyodide__pyodide-4554
`OSError: [Errno 9] Bad file descriptor` when trying to load `.npy` files, works with `.npz` file format ## 🐛 Bug NumPy is unable to load arrays from `.npy` binaries, but it can read from compressed `.npz` archives. ### To Reproduce I noticed this error when compiling PyWavelets (`pywt`) from source via the Emscripten toolchain. In an activated virtual environment created by Pyodide, run the following: ```bash git clone https://github.com/PyWavelets/pywt.git pip install . ``` and then ```python import pywt import numpy as np aero = pywt.data.aero() ref = np.array([[178, 178, 179], [170, 173, 171], [185, 174, 171]]) np.testing.assert_allclose(aero[:3, :3], ref) ``` should fail. However, [after converting](https://github.com/PyWavelets/pywt/pull/701/files#diff-86b5b5c7cbe8cc8368f6991c914b7263019507351ce567543cbf2b627b91aa57) these `.npy` files to `.npz`, NumPy can safely load the arrays from the files as requested. Here is an example of conversion from `.npy` to `.npz`: ```python import numpy as np ecg = np.load("pywt/data/ecg.npy") np.savez(file="ecg.npz", data=ecg) ``` after which `ecg.npz` can be loaded as follows: ```python import numpy as np loaded = np.load("ecg.npz") print(loaded["data"]) ``` ### Expected behavior The Pyodide environment should be able to load the `.npy` file format stored in a directory, but [fails with multiple `OSError`s](https://github.com/agriyakhetarpal/pywt/actions/runs/7993252511/job/21828629911), possibly due to the lack of a server for filesystem access as the Pyodide documentation mentions – but this doesn't explain why `.npz` files work? The expected behaviour should be that all file formats work. ### Environment - Pyodide Version<!-- (e.g. 1.8.1) -->: `pyodide-build` version 0.25.0 - Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: N/A - Any other relevant information: <!-- If you are building Pyodide by yourself, please also include these information: --> <!-- - Commit hash of Pyodide git repository: - Build environment<!--(e.g. Ubuntu 18.04, pyodide/pyodide-env:19 docker)- ->: --> ### Additional context xref: https://github.com/PyWavelets/pywt/pull/701
[ { "content": "\"\"\"\nVarious common utilities for testing.\n\"\"\"\n\nimport contextlib\nimport os\nimport pathlib\nimport re\nimport sys\nfrom collections.abc import Sequence\n\nimport pytest\n\nROOT_PATH = pathlib.Path(__file__).parents[0].resolve()\nDIST_PATH = ROOT_PATH / \"dist\"\n\nsys.path.append(str(ROOT_PATH / \"pyodide-build\"))\nsys.path.append(str(ROOT_PATH / \"src\" / \"py\"))\n\nimport pytest_pyodide.runner\n\n# importing this fixture has a side effect of making the safari webdriver reused during the session\nfrom pytest_pyodide.runner import use_global_safari_service # noqa: F401\nfrom pytest_pyodide.utils import package_is_built as _package_is_built\n\nos.environ[\"IN_PYTEST\"] = \"1\"\npytest_pyodide.runner.CHROME_FLAGS.extend(\n [\n \"--enable-features=WebAssemblyExperimentalJSPI\",\n \"--enable-experimental-webassembly-features\",\n ]\n)\npytest_pyodide.runner.NODE_FLAGS.extend([\"--experimental-wasm-stack-switching\"])\n\n# There are a bunch of global objects that occasionally enter the hiwire cache\n# but never leave. The refcount checks get angry about them if they aren't preloaded.\n# We need to go through and touch them all once to keep everything okay.\npytest_pyodide.runner.INITIALIZE_SCRIPT = \"\"\"\n pyodide.globals.get;\n pyodide.runPython(\"import pyodide_js._api; del pyodide_js\");\n pyodide._api.importlib.invalidate_caches;\n pyodide._api.package_loader.unpack_buffer;\n pyodide._api.package_loader.get_dynlibs;\n pyodide._api.pyodide_code.eval_code;\n pyodide._api.pyodide_code.eval_code_async;\n pyodide._api.pyodide_code.find_imports;\n pyodide._api.pyodide_ffi.register_js_module;\n pyodide._api.pyodide_ffi.unregister_js_module;\n pyodide.pyimport(\"pyodide.ffi.wrappers\").destroy();\n pyodide.pyimport(\"pyodide.http\").destroy();\n pyodide.pyimport(\"pyodide_js._api\");\n\"\"\"\n\nonly_node = pytest.mark.xfail_browsers(\n chrome=\"node only\", firefox=\"node only\", safari=\"node only\"\n)\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group.addoption(\n \"--run-xfail\",\n action=\"store_true\",\n help=\"If provided, tests marked as xfail will be run\",\n )\n group.addoption(\n \"--skip-passed\",\n action=\"store_true\",\n help=(\n \"If provided, tests that passed on the last run will be skipped. \"\n \"CAUTION: this will skip tests even if tests are modified\"\n ),\n )\n\n\ndef maybe_skip_test(item, delayed=False):\n \"\"\"If necessary skip test at the fixture level, to avoid\n loading the selenium_standalone fixture which takes a long time.\n \"\"\"\n browsers = \"|\".join([\"firefox\", \"chrome\", \"node\", \"safari\"])\n is_common_test = str(item.fspath).endswith(\"test_packages_common.py\")\n\n skip_msg = None\n # Testing a package. Skip the test if the package is not built.\n match = re.match(\n r\".*/packages/(?P<name>[\\w\\-]+)/test_[\\w\\-]+\\.py\", str(item.parent.fspath)\n )\n if match and not is_common_test:\n package_name = match.group(\"name\")\n if not package_is_built(package_name) and re.match(\n rf\"test_[\\w\\-\\.]+\\[({browsers})[^\\]]*\\]\", item.name\n ):\n skip_msg = f\"package '{package_name}' is not built.\"\n\n # Common package import test. Skip it if the package is not built.\n if skip_msg is None and is_common_test and item.name.startswith(\"test_import\"):\n if not pytest.pyodide_runtimes: # type:ignore[attr-defined]\n skip_msg = \"Not running browser tests\"\n\n else:\n match = re.match(\n rf\"test_import\\[({browsers})-(?P<name>[\\w\\-\\.]+)\\]\", item.name\n )\n if match:\n package_name = match.group(\"name\")\n if not package_is_built(package_name):\n # selenium_standalone as it takes a long time to initialize\n skip_msg = f\"package '{package_name}' is not built.\"\n else:\n raise AssertionError(\n f\"Couldn't parse package name from {item.name}. This should not happen!\"\n ) # If the test is going to be skipped remove the\n\n # TODO: also use this hook to skip doctests we cannot run (or run them\n # inside the selenium wrapper)\n\n if skip_msg is not None:\n if delayed:\n item.add_marker(pytest.mark.skip(reason=skip_msg))\n else:\n pytest.skip(skip_msg)\n\n\ndef pytest_configure(config):\n \"\"\"Monkey patch the function cwd_relative_nodeid\n\n returns the description of a test for the short summary table. Monkey patch\n it to reduce the verbosity of the test names in the table. This leaves\n enough room to see the information about the test failure in the summary.\n \"\"\"\n global CONFIG\n\n old_cwd_relative_nodeid = config.cwd_relative_nodeid\n\n def cwd_relative_nodeid(*args):\n result = old_cwd_relative_nodeid(*args)\n result = result.replace(\"src/tests/\", \"\")\n result = result.replace(\"packages/\", \"\")\n result = result.replace(\"::test_\", \"::\")\n return result\n\n config.cwd_relative_nodeid = cwd_relative_nodeid\n\n pytest.pyodide_dist_dir = config.getoption(\"--dist-dir\") # type:ignore[attr-defined]\n\n\ndef pytest_collection_modifyitems(config, items):\n \"\"\"Called after collect is completed.\n Parameters\n ----------\n config : pytest config\n items : list of collected items\n \"\"\"\n prev_test_result = {}\n if config.getoption(\"--skip-passed\"):\n cache = config.cache\n prev_test_result = cache.get(\"cache/lasttestresult\", {})\n\n for item in items:\n if prev_test_result.get(item.nodeid) in (\"passed\", \"warnings\", \"skip_passed\"):\n item.add_marker(pytest.mark.skip(reason=\"previously passed\"))\n continue\n\n maybe_skip_test(item, delayed=True)\n\n\n# Save test results to a cache\n# Code adapted from: https://github.com/pytest-dev/pytest/blob/main/src/_pytest/pastebin.py\[email protected](trylast=True)\ndef pytest_terminal_summary(terminalreporter):\n tr = terminalreporter\n cache = tr.config.cache\n assert cache\n\n test_result = {}\n for status in tr.stats:\n if status in (\"warnings\", \"deselected\"):\n continue\n\n for test in tr.stats[status]:\n if test.when != \"call\": # discard results from setup/teardown\n continue\n\n try:\n if test.longrepr and test.longrepr[2] in \"previously passed\":\n test_result[test.nodeid] = \"skip_passed\"\n else:\n test_result[test.nodeid] = test.outcome\n except Exception:\n pass\n\n cache.set(\"cache/lasttestresult\", test_result)\n\n\[email protected](wrapper=True)\ndef pytest_runtest_call(item):\n \"\"\"We want to run extra verification at the start and end of each test to\n check that we haven't leaked memory. According to pytest issue #5044, it's\n not possible to \"Fail\" a test from a fixture (no matter what you do, pytest\n sets the test status to \"Error\"). The approach suggested there is hook\n pytest_runtest_call as we do here. To get access to the selenium fixture, we\n imitate the definition of pytest_pyfunc_call:\n https://github.com/pytest-dev/pytest/blob/6.2.2/src/_pytest/python.py#L177\n\n Pytest issue #5044:\n https://github.com/pytest-dev/pytest/issues/5044\n \"\"\"\n browser = None\n for fixture in item._fixtureinfo.argnames:\n if fixture.startswith(\"selenium\"):\n browser = item.funcargs[fixture]\n break\n\n if not browser or not browser.pyodide_loaded:\n result = yield\n return result\n\n trace_pyproxies = pytest.mark.skip_pyproxy_check.mark not in item.own_markers\n trace_hiwire_refs = (\n trace_pyproxies and pytest.mark.skip_refcount_check.mark not in item.own_markers\n )\n yield from extra_checks_test_wrapper(\n browser, trace_hiwire_refs, trace_pyproxies, item\n )\n\n\ndef extra_checks_test_wrapper(browser, trace_hiwire_refs, trace_pyproxies, item):\n \"\"\"Extra conditions for test to pass:\n 1. No explicit request for test to fail\n 2. No leaked JsRefs\n 3. No leaked PyProxys\n \"\"\"\n browser.clear_force_test_fail()\n init_num_keys = browser.get_num_hiwire_keys()\n if trace_pyproxies:\n browser.enable_pyproxy_tracing()\n init_num_proxies = browser.get_num_proxies()\n err = False\n try:\n result = yield\n except Exception:\n err = True\n raise\n finally:\n # Suppress any errors if an error was raised so we keep the original error\n with contextlib.suppress(Exception) if err else contextlib.nullcontext():\n browser.disable_pyproxy_tracing()\n browser.restore_state()\n\n if browser.force_test_fail:\n raise Exception(\"Test failure explicitly requested but no error was raised.\")\n if trace_pyproxies and trace_hiwire_refs:\n delta_proxies = browser.get_num_proxies() - init_num_proxies\n delta_keys = browser.get_num_hiwire_keys() - init_num_keys\n if delta_proxies > 0:\n pxs = browser.run_js(\n \"\"\"\n return Array.from(pyodide._module.pyproxy_alloc_map.entries(), ([x, s]) => [x.type, x.toString(), \"Traceback at creation:\" + s.replace(\"Error\", \"\")])\n \"\"\"\n )\n capman = item.config.pluginmanager.getplugin(\"capturemanager\")\n with capman.item_capture(\"call\", item):\n print(\"\\n\" + \"!\" * 40)\n print(\"leaked proxies:\")\n for row in pxs:\n print(*row)\n\n assert (delta_proxies, delta_keys) == (0, 0) or delta_keys < 0\n if trace_hiwire_refs:\n delta_keys = browser.get_num_hiwire_keys() - init_num_keys\n assert delta_keys <= 0\n return result\n\n\ndef package_is_built(package_name):\n return _package_is_built(package_name, pytest.pyodide_dist_dir) # type:ignore[attr-defined]\n\n\ndef strip_assertions_stderr(messages: Sequence[str]) -> list[str]:\n \"\"\"Strip additional messages on stderr included when ASSERTIONS=1\"\"\"\n res = []\n for msg in messages:\n if msg.strip() in [\n \"sigaction: signal type not supported: this is a no-op.\",\n \"Calling stub instead of siginterrupt()\",\n \"warning: no blob constructor, cannot create blobs with mimetypes\",\n \"warning: no BlobBuilder\",\n ]:\n continue\n res.append(msg)\n return res\n", "path": "conftest.py" } ]
[ { "content": "\"\"\"\nVarious common utilities for testing.\n\"\"\"\n\nimport contextlib\nimport os\nimport pathlib\nimport re\nimport sys\nfrom collections.abc import Sequence\n\nimport pytest\n\nROOT_PATH = pathlib.Path(__file__).parents[0].resolve()\nDIST_PATH = ROOT_PATH / \"dist\"\n\nsys.path.append(str(ROOT_PATH / \"pyodide-build\"))\nsys.path.append(str(ROOT_PATH / \"src\" / \"py\"))\n\nimport pytest_pyodide.runner\n\n# importing this fixture has a side effect of making the safari webdriver reused during the session\nfrom pytest_pyodide.runner import use_global_safari_service # noqa: F401\nfrom pytest_pyodide.utils import package_is_built as _package_is_built\n\nos.environ[\"IN_PYTEST\"] = \"1\"\npytest_pyodide.runner.CHROME_FLAGS.extend(\n [\n \"--enable-features=WebAssemblyExperimentalJSPI\",\n \"--enable-experimental-webassembly-features\",\n ]\n)\npytest_pyodide.runner.NODE_FLAGS.extend([\"--experimental-wasm-stack-switching\"])\n\n# There are a bunch of global objects that occasionally enter the hiwire cache\n# but never leave. The refcount checks get angry about them if they aren't preloaded.\n# We need to go through and touch them all once to keep everything okay.\npytest_pyodide.runner.INITIALIZE_SCRIPT = \"\"\"\n pyodide.globals.get;\n pyodide.runPython(\"import pyodide_js._api; del pyodide_js\");\n pyodide._api.importlib.invalidate_caches;\n pyodide._api.package_loader.unpack_buffer;\n pyodide._api.package_loader.get_dynlibs;\n pyodide._api.pyodide_code.eval_code;\n pyodide._api.pyodide_code.eval_code_async;\n pyodide._api.pyodide_code.find_imports;\n pyodide._api.pyodide_ffi.register_js_module;\n pyodide._api.pyodide_ffi.unregister_js_module;\n pyodide.pyimport(\"pyodide.ffi.wrappers\").destroy();\n pyodide.pyimport(\"pyodide.http\").destroy();\n pyodide.pyimport(\"pyodide_js._api\");\n\"\"\"\n\nonly_node = pytest.mark.xfail_browsers(\n chrome=\"node only\", firefox=\"node only\", safari=\"node only\"\n)\nonly_chrome = pytest.mark.xfail_browsers(\n node=\"chrome only\", firefox=\"chrome only\", safari=\"chrome only\"\n)\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group.addoption(\n \"--run-xfail\",\n action=\"store_true\",\n help=\"If provided, tests marked as xfail will be run\",\n )\n group.addoption(\n \"--skip-passed\",\n action=\"store_true\",\n help=(\n \"If provided, tests that passed on the last run will be skipped. \"\n \"CAUTION: this will skip tests even if tests are modified\"\n ),\n )\n\n\ndef maybe_skip_test(item, delayed=False):\n \"\"\"If necessary skip test at the fixture level, to avoid\n loading the selenium_standalone fixture which takes a long time.\n \"\"\"\n browsers = \"|\".join([\"firefox\", \"chrome\", \"node\", \"safari\"])\n is_common_test = str(item.fspath).endswith(\"test_packages_common.py\")\n\n skip_msg = None\n # Testing a package. Skip the test if the package is not built.\n match = re.match(\n r\".*/packages/(?P<name>[\\w\\-]+)/test_[\\w\\-]+\\.py\", str(item.parent.fspath)\n )\n if match and not is_common_test:\n package_name = match.group(\"name\")\n if not package_is_built(package_name) and re.match(\n rf\"test_[\\w\\-\\.]+\\[({browsers})[^\\]]*\\]\", item.name\n ):\n skip_msg = f\"package '{package_name}' is not built.\"\n\n # Common package import test. Skip it if the package is not built.\n if skip_msg is None and is_common_test and item.name.startswith(\"test_import\"):\n if not pytest.pyodide_runtimes: # type:ignore[attr-defined]\n skip_msg = \"Not running browser tests\"\n\n else:\n match = re.match(\n rf\"test_import\\[({browsers})-(?P<name>[\\w\\-\\.]+)\\]\", item.name\n )\n if match:\n package_name = match.group(\"name\")\n if not package_is_built(package_name):\n # selenium_standalone as it takes a long time to initialize\n skip_msg = f\"package '{package_name}' is not built.\"\n else:\n raise AssertionError(\n f\"Couldn't parse package name from {item.name}. This should not happen!\"\n ) # If the test is going to be skipped remove the\n\n # TODO: also use this hook to skip doctests we cannot run (or run them\n # inside the selenium wrapper)\n\n if skip_msg is not None:\n if delayed:\n item.add_marker(pytest.mark.skip(reason=skip_msg))\n else:\n pytest.skip(skip_msg)\n\n\ndef pytest_configure(config):\n \"\"\"Monkey patch the function cwd_relative_nodeid\n\n returns the description of a test for the short summary table. Monkey patch\n it to reduce the verbosity of the test names in the table. This leaves\n enough room to see the information about the test failure in the summary.\n \"\"\"\n global CONFIG\n\n old_cwd_relative_nodeid = config.cwd_relative_nodeid\n\n def cwd_relative_nodeid(*args):\n result = old_cwd_relative_nodeid(*args)\n result = result.replace(\"src/tests/\", \"\")\n result = result.replace(\"packages/\", \"\")\n result = result.replace(\"::test_\", \"::\")\n return result\n\n config.cwd_relative_nodeid = cwd_relative_nodeid\n\n pytest.pyodide_dist_dir = config.getoption(\"--dist-dir\") # type:ignore[attr-defined]\n\n\ndef pytest_collection_modifyitems(config, items):\n \"\"\"Called after collect is completed.\n Parameters\n ----------\n config : pytest config\n items : list of collected items\n \"\"\"\n prev_test_result = {}\n if config.getoption(\"--skip-passed\"):\n cache = config.cache\n prev_test_result = cache.get(\"cache/lasttestresult\", {})\n\n for item in items:\n if prev_test_result.get(item.nodeid) in (\"passed\", \"warnings\", \"skip_passed\"):\n item.add_marker(pytest.mark.skip(reason=\"previously passed\"))\n continue\n\n maybe_skip_test(item, delayed=True)\n\n\n# Save test results to a cache\n# Code adapted from: https://github.com/pytest-dev/pytest/blob/main/src/_pytest/pastebin.py\[email protected](trylast=True)\ndef pytest_terminal_summary(terminalreporter):\n tr = terminalreporter\n cache = tr.config.cache\n assert cache\n\n test_result = {}\n for status in tr.stats:\n if status in (\"warnings\", \"deselected\"):\n continue\n\n for test in tr.stats[status]:\n if test.when != \"call\": # discard results from setup/teardown\n continue\n\n try:\n if test.longrepr and test.longrepr[2] in \"previously passed\":\n test_result[test.nodeid] = \"skip_passed\"\n else:\n test_result[test.nodeid] = test.outcome\n except Exception:\n pass\n\n cache.set(\"cache/lasttestresult\", test_result)\n\n\[email protected](wrapper=True)\ndef pytest_runtest_call(item):\n \"\"\"We want to run extra verification at the start and end of each test to\n check that we haven't leaked memory. According to pytest issue #5044, it's\n not possible to \"Fail\" a test from a fixture (no matter what you do, pytest\n sets the test status to \"Error\"). The approach suggested there is hook\n pytest_runtest_call as we do here. To get access to the selenium fixture, we\n imitate the definition of pytest_pyfunc_call:\n https://github.com/pytest-dev/pytest/blob/6.2.2/src/_pytest/python.py#L177\n\n Pytest issue #5044:\n https://github.com/pytest-dev/pytest/issues/5044\n \"\"\"\n browser = None\n for fixture in item._fixtureinfo.argnames:\n if fixture.startswith(\"selenium\"):\n browser = item.funcargs[fixture]\n break\n\n if not browser or not browser.pyodide_loaded:\n result = yield\n return result\n\n trace_pyproxies = pytest.mark.skip_pyproxy_check.mark not in item.own_markers\n trace_hiwire_refs = (\n trace_pyproxies and pytest.mark.skip_refcount_check.mark not in item.own_markers\n )\n yield from extra_checks_test_wrapper(\n browser, trace_hiwire_refs, trace_pyproxies, item\n )\n\n\ndef extra_checks_test_wrapper(browser, trace_hiwire_refs, trace_pyproxies, item):\n \"\"\"Extra conditions for test to pass:\n 1. No explicit request for test to fail\n 2. No leaked JsRefs\n 3. No leaked PyProxys\n \"\"\"\n browser.clear_force_test_fail()\n init_num_keys = browser.get_num_hiwire_keys()\n if trace_pyproxies:\n browser.enable_pyproxy_tracing()\n init_num_proxies = browser.get_num_proxies()\n err = False\n try:\n result = yield\n except Exception:\n err = True\n raise\n finally:\n # Suppress any errors if an error was raised so we keep the original error\n with contextlib.suppress(Exception) if err else contextlib.nullcontext():\n browser.disable_pyproxy_tracing()\n browser.restore_state()\n\n if browser.force_test_fail:\n raise Exception(\"Test failure explicitly requested but no error was raised.\")\n if trace_pyproxies and trace_hiwire_refs:\n delta_proxies = browser.get_num_proxies() - init_num_proxies\n delta_keys = browser.get_num_hiwire_keys() - init_num_keys\n if delta_proxies > 0:\n pxs = browser.run_js(\n \"\"\"\n return Array.from(pyodide._module.pyproxy_alloc_map.entries(), ([x, s]) => [x.type, x.toString(), \"Traceback at creation:\" + s.replace(\"Error\", \"\")])\n \"\"\"\n )\n capman = item.config.pluginmanager.getplugin(\"capturemanager\")\n with capman.item_capture(\"call\", item):\n print(\"\\n\" + \"!\" * 40)\n print(\"leaked proxies:\")\n for row in pxs:\n print(*row)\n\n assert (delta_proxies, delta_keys) == (0, 0) or delta_keys < 0\n if trace_hiwire_refs:\n delta_keys = browser.get_num_hiwire_keys() - init_num_keys\n assert delta_keys <= 0\n return result\n\n\ndef package_is_built(package_name):\n return _package_is_built(package_name, pytest.pyodide_dist_dir) # type:ignore[attr-defined]\n\n\ndef strip_assertions_stderr(messages: Sequence[str]) -> list[str]:\n \"\"\"Strip additional messages on stderr included when ASSERTIONS=1\"\"\"\n res = []\n for msg in messages:\n if msg.strip() in [\n \"sigaction: signal type not supported: this is a no-op.\",\n \"Calling stub instead of siginterrupt()\",\n \"warning: no blob constructor, cannot create blobs with mimetypes\",\n \"warning: no BlobBuilder\",\n ]:\n continue\n res.append(msg)\n return res\n", "path": "conftest.py" } ]
diff --git a/conftest.py b/conftest.py index bda0bcbb4d8..fb1cb42996a 100644 --- a/conftest.py +++ b/conftest.py @@ -54,6 +54,9 @@ only_node = pytest.mark.xfail_browsers( chrome="node only", firefox="node only", safari="node only" ) +only_chrome = pytest.mark.xfail_browsers( + node="chrome only", firefox="chrome only", safari="chrome only" +) def pytest_addoption(parser): diff --git a/docs/project/changelog.md b/docs/project/changelog.md index 6ef873c4dec..1e760490cec 100644 --- a/docs/project/changelog.md +++ b/docs/project/changelog.md @@ -45,6 +45,10 @@ myst: - {{ Enhancement }} The `build/post` script now runs under the directory where the built wheel is unpacked. + {pr}`4481` + +- {{ Fix }} `dup` now works correctly in the Node filesystem. + {pr}`4554` ### Packages diff --git a/emsdk/patches/0001-Fix-dup-in-nodefs-by-refcounting-nodefs-file-descrip.patch b/emsdk/patches/0001-Fix-dup-in-nodefs-by-refcounting-nodefs-file-descrip.patch new file mode 100644 index 00000000000..ebef6f3e2bb --- /dev/null +++ b/emsdk/patches/0001-Fix-dup-in-nodefs-by-refcounting-nodefs-file-descrip.patch @@ -0,0 +1,166 @@ +From 135baa3d4cc211c0145d2e1df116e39f60df4f91 Mon Sep 17 00:00:00 2001 +From: Hood Chatham <[email protected]> +Date: Thu, 22 Feb 2024 21:47:13 -0800 +Subject: [PATCH] Fix dup in nodefs by refcounting nodefs file descriptors + (#21399) + +--- + src/library_fs.js | 5 +++++ + src/library_nodefs.js | 6 +++++- + src/library_syscall.js | 6 +++--- + test/fs/test_nodefs_dup.c | 45 +++++++++++++++++++++++++++++++++++++++ + test/test_core.py | 8 +++++++ + 5 files changed, 66 insertions(+), 4 deletions(-) + create mode 100644 test/fs/test_nodefs_dup.c + +diff --git a/src/library_fs.js b/src/library_fs.js +index f5d16b86c..379e65268 100644 +--- a/src/library_fs.js ++++ b/src/library_fs.js +@@ -471,6 +471,11 @@ FS.staticInit();` + + closeStream(fd) { + FS.streams[fd] = null; + }, ++ dupStream(origStream, fd = -1) { ++ var stream = FS.createStream(origStream, fd); ++ stream.stream_ops?.dup?.(stream); ++ return stream; ++ }, + + // + // devices +diff --git a/src/library_nodefs.js b/src/library_nodefs.js +index 81864ffcc..ace50458c 100644 +--- a/src/library_nodefs.js ++++ b/src/library_nodefs.js +@@ -251,6 +251,7 @@ addToLibrary({ + var path = NODEFS.realPath(stream.node); + try { + if (FS.isFile(stream.node.mode)) { ++ stream.shared.refcount = 1; + stream.nfd = fs.openSync(path, NODEFS.flagsForNode(stream.flags)); + } + } catch (e) { +@@ -260,7 +261,7 @@ addToLibrary({ + }, + close(stream) { + try { +- if (FS.isFile(stream.node.mode) && stream.nfd) { ++ if (FS.isFile(stream.node.mode) && stream.nfd && --stream.shared.refcount === 0) { + fs.closeSync(stream.nfd); + } + } catch (e) { +@@ -268,6 +269,9 @@ addToLibrary({ + throw new FS.ErrnoError(NODEFS.convertNodeCode(e)); + } + }, ++ dup(stream) { ++ stream.shared.refcount++; ++ }, + read(stream, buffer, offset, length, position) { + // Node.js < 6 compatibility: node errors on 0 length reads + if (length === 0) return 0; +diff --git a/src/library_syscall.js b/src/library_syscall.js +index b078bd71c..69ea6f8b2 100644 +--- a/src/library_syscall.js ++++ b/src/library_syscall.js +@@ -183,7 +183,7 @@ var SyscallsLibrary = { + }, + __syscall_dup: (fd) => { + var old = SYSCALLS.getStreamFromFD(fd); +- return FS.createStream(old).fd; ++ return FS.dupStream(old).fd; + }, + __syscall_pipe__deps: ['$PIPEFS'], + __syscall_pipe: (fdPtr) => { +@@ -760,7 +760,7 @@ var SyscallsLibrary = { + arg++; + } + var newStream; +- newStream = FS.createStream(stream, arg); ++ newStream = FS.dupStream(stream, arg); + return newStream.fd; + } + case {{{ cDefs.F_GETFD }}}: +@@ -1007,7 +1007,7 @@ var SyscallsLibrary = { + if (old.fd === newfd) return -{{{ cDefs.EINVAL }}}; + var existing = FS.getStream(newfd); + if (existing) FS.close(existing); +- return FS.createStream(old, newfd).fd; ++ return FS.dupStream(old, newfd).fd; + }, + }; + +diff --git a/test/fs/test_nodefs_dup.c b/test/fs/test_nodefs_dup.c +new file mode 100644 +index 000000000..abf34935b +--- /dev/null ++++ b/test/fs/test_nodefs_dup.c +@@ -0,0 +1,45 @@ ++/* ++ * Copyright 2018 The Emscripten Authors. All rights reserved. ++ * Emscripten is available under two separate licenses, the MIT license and the ++ * University of Illinois/NCSA Open Source License. Both these licenses can be ++ * found in the LICENSE file. ++ */ ++ ++#include <assert.h> ++#include <emscripten.h> ++#include <fcntl.h> ++#include <stdio.h> ++#include <unistd.h> ++ ++#ifdef NODERAWFS ++#define CWD "" ++#else ++#define CWD "/working/" ++#endif ++ ++int main(void) ++{ ++ EM_ASM( ++#ifdef NODERAWFS ++ FS.close(FS.open('test.txt', 'w')); ++#else ++ FS.mkdir('/working'); ++ FS.mount(NODEFS, {root: '.'}, '/working'); ++ FS.close(FS.open('/working/test.txt', 'w')); ++#endif ++ ); ++ int fd1 = open(CWD "test.txt", O_WRONLY); ++ int fd2 = dup(fd1); ++ int fd3 = fcntl(fd1, F_DUPFD_CLOEXEC, 0); ++ ++ assert(fd1 == 3); ++ assert(fd2 == 4); ++ assert(fd3 == 5); ++ assert(close(fd1) == 0); ++ assert(write(fd2, "abcdef", 6) == 6); ++ assert(close(fd2) == 0); ++ assert(write(fd3, "ghijkl", 6) == 6); ++ assert(close(fd3) == 0); ++ printf("success\n"); ++ return 0; ++} +diff --git a/test/test_core.py b/test/test_core.py +index 3f21eb5ef..f304f1366 100644 +--- a/test/test_core.py ++++ b/test/test_core.py +@@ -5745,6 +5745,14 @@ Module = { + self.emcc_args += ['-lnodefs.js'] + self.do_runf('fs/test_nodefs_cloexec.c', 'success') + ++ @also_with_noderawfs ++ @requires_node ++ def test_fs_nodefs_dup(self): ++ if self.get_setting('WASMFS'): ++ self.set_setting('FORCE_FILESYSTEM') ++ self.emcc_args += ['-lnodefs.js'] ++ self.do_runf('fs/test_nodefs_dup.c', 'success') ++ + @requires_node + def test_fs_nodefs_home(self): + self.set_setting('FORCE_FILESYSTEM') +-- +2.34.1 + diff --git a/src/tests/test_filesystem.py b/src/tests/test_filesystem.py index 036d778a16e..a0decd0ad0c 100644 --- a/src/tests/test_filesystem.py +++ b/src/tests/test_filesystem.py @@ -4,6 +4,9 @@ """ import pytest +from pytest_pyodide import run_in_pyodide + +from conftest import only_chrome @pytest.mark.skip_refcount_check @@ -22,7 +25,7 @@ def test_idbfs_persist_code(selenium_standalone): f""" let mountDir = '{mount_dir}'; pyodide.FS.mkdir(mountDir); - pyodide.FS.mount(pyodide.FS.filesystems.{fstype}, {{root : "."}}, "{mount_dir}"); + pyodide.FS.mount(pyodide.FS.filesystems.{fstype}, {{root : "."}}, mountDir); """ ) # create file in mount @@ -109,9 +112,7 @@ def test_idbfs_persist_code(selenium_standalone): @pytest.mark.requires_dynamic_linking [email protected]_browsers( - node="Not available", firefox="Not available", safari="Not available" -) +@only_chrome def test_nativefs_dir(request, selenium_standalone): # Note: Using *real* native file system requires # user interaction so it is not available in headless mode. @@ -254,3 +255,78 @@ def test_nativefs_dir(request, selenium_standalone): pyodide.FS.unmount("/mnt/nativefs"); """ ) + + [email protected] +def browser(selenium): + return selenium.browser + + [email protected] +def runner(request): + return request.config.option.runner + + +@run_in_pyodide +def test_fs_dup(selenium, browser): + from os import close, dup + from pathlib import Path + + from pyodide.code import run_js + + if browser == "node": + fstype = "NODEFS" + else: + fstype = "IDBFS" + + mount_dir = Path("/mount_test") + mount_dir.mkdir(exist_ok=True) + run_js( + """ + (fstype, mountDir) => + pyodide.FS.mount(pyodide.FS.filesystems[fstype], {root : "."}, mountDir); + """ + )(fstype, str(mount_dir)) + + file = open("/mount_test/a.txt", "w") + fd2 = dup(file.fileno()) + close(fd2) + file.write("abcd") + file.close() + + [email protected]_dynamic_linking +@only_chrome +@run_in_pyodide +async def test_nativefs_dup(selenium, runner): + from os import close, dup + + import pytest + + from pyodide.code import run_js + + # Note: Using *real* native file system requires + # user interaction so it is not available in headless mode. + # So in this test we use OPFS (Origin Private File System) + # which is part of File System Access API but uses indexDB as a backend. + + if runner == "playwright": + pytest.xfail("Playwright doesn't support file system access APIs") + + await run_js( + """ + async () => { + root = await navigator.storage.getDirectory(); + testFileHandle = await root.getFileHandle('test_read', { create: true }); + writable = await testFileHandle.createWritable(); + await writable.write("hello_read"); + await writable.close(); + await pyodide.mountNativeFS("/mnt/nativefs", root); + } + """ + )() + file = open("/mnt/nativefs/test_read") + fd2 = dup(file.fileno()) + close(fd2) + assert file.read() == "hello_read" + file.close()
mozilla__bugbug-331
Figure out what to do with http_service on CI We have two options: - build the http_service with fake models and don't push it on CI. Build it with real models and push it after training; - build the http_service without models and let it download models at runtime.
[ { "content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nimport sys\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS = {\n \"defectenhancementtask\": DefectEnhancementTaskModel,\n \"component\": ComponentModel,\n \"regression\": RegressionModel,\n}\nMODELS_DIR = os.path.join(os.path.dirname(__file__), \"models\")\n\n\ndef load_model(model):\n model_file_path = os.path.join(MODELS_DIR, f\"{model}model\")\n LOGGER.info(f\"Lookup model in {model_file_path}\")\n model = MODELS[model].load(model_file_path)\n return model\n\n\ndef check_models():\n for model_name in MODELS.keys():\n # Try loading the model\n load_model(model_name)\n\n\nif __name__ == \"__main__\":\n try:\n check_models()\n except Exception:\n LOGGER.warning(\n \"Failed to validate the models, please run `python models.py download`\",\n exc_info=True,\n )\n sys.exit(1)\n", "path": "http_service/check_models.py" } ]
[ { "content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nimport sys\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS = {\n \"defectenhancementtask\": DefectEnhancementTaskModel,\n \"component\": ComponentModel,\n \"regression\": RegressionModel,\n}\nMODELS_DIR = os.path.join(os.path.dirname(__file__), \"models\")\n\n\ndef load_model(model):\n model_file_path = os.path.join(MODELS_DIR, f\"{model}model\")\n LOGGER.info(f\"Lookup model in {model_file_path}\")\n model = MODELS[model].load(model_file_path)\n return model\n\n\ndef check_models():\n for model_name in MODELS.keys():\n # Try loading the model\n load_model(model_name)\n\n\nif __name__ == \"__main__\":\n\n should_check_models = os.environ.get(\"CHECK_MODELS\", \"1\")\n\n if should_check_models == \"0\":\n print(\"Skipping checking models as instructed by env var $CHECK_MODELS\")\n sys.exit(0)\n\n try:\n check_models()\n except Exception:\n LOGGER.warning(\n \"Failed to validate the models, please run `python models.py download`\",\n exc_info=True,\n )\n sys.exit(1)\n", "path": "http_service/check_models.py" } ]
diff --git a/.taskcluster.yml b/.taskcluster.yml index 8eede7e15a..6caaaefb5a 100644 --- a/.taskcluster.yml +++ b/.taskcluster.yml @@ -153,14 +153,14 @@ tasks: capabilities: privileged: true maxRunTime: 10800 - image: mozilla/taskboot:0.1.0 + image: mozilla/taskboot:0.1.1 command: - "/bin/sh" - "-lcxe" - "git clone ${repository} /code && cd /code && git checkout ${head_rev} && - taskboot --cache /cache --target /code build-compose --registry=registry.hub.docker.com --write /images" + taskboot --cache /cache --target /code build-compose --registry=registry.hub.docker.com --write /images --build-arg CHECK_MODELS=0" artifacts: public/bugbug: expires: {$fromNow: '2 weeks'} @@ -229,12 +229,13 @@ tasks: taskclusterProxy: true maxRunTime: 3600 - image: mozilla/taskboot:0.1.0 + image: mozilla/taskboot:0.1.1 env: TASKCLUSTER_SECRET: project/relman/bugbug/deploy command: - taskboot - push-artifact + - --exclude-filter *http-service* routes: - project.relman.bugbug.deploy_ending metadata: @@ -260,7 +261,7 @@ tasks: taskclusterProxy: true maxRunTime: 3600 - image: mozilla/taskboot:0.1.0 + image: mozilla/taskboot:0.1.1 env: GIT_REPOSITORY: ${repository} GIT_REVISION: ${head_rev} diff --git a/docker-compose.yml b/docker-compose.yml index 7e320e9f9b..e7582f5542 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -56,11 +56,17 @@ services: dockerfile: infra/dockerfile.train_tracking image: mozilla/bugbug-train-tracking - bugbug-http-service-base: + bugbug-http-service: build: context: http_service - dockerfile: Dockerfile.base - image: mozilla/bugbug-http-service-base + image: mozilla/bugbug-http-service + environment: + - BUGBUG_BUGZILLA_TOKEN + ports: + - target: 8000 + published: 8000 + protocol: tcp + mode: host bugbug-spawn-data-pipeline: build: diff --git a/http_service/Dockerfile b/http_service/Dockerfile index 565fb75a18..f050d8cbb3 100644 --- a/http_service/Dockerfile +++ b/http_service/Dockerfile @@ -1,10 +1,21 @@ -FROM mozilla/bugbug-http-service-base:latest +FROM mozilla/bugbug-base:latest + +RUN env + +COPY requirements.txt /code/bugbug_http_service/ + +RUN pip install -r /code/bugbug_http_service/requirements.txt + +COPY . /code/bugbug_http_service/ # Load the models WORKDIR /code/ COPY ./models /code/models +ARG CHECK_MODELS +ENV CHECK_MODELS="${CHECK_MODELS}" + RUN python /code/bugbug_http_service/check_models.py CMD ["gunicorn", "-b", "0.0.0.0:8000", "bugbug_http_service.app", "--preload", "--timeout", "30", "-w", "3"] diff --git a/http_service/Dockerfile.base b/http_service/Dockerfile.base deleted file mode 100644 index 78906e7695..0000000000 --- a/http_service/Dockerfile.base +++ /dev/null @@ -1,7 +0,0 @@ -FROM mozilla/bugbug-base:latest - -COPY requirements.txt /code/bugbug_http_service/ - -RUN pip install -r /code/bugbug_http_service/requirements.txt - -COPY . /code/bugbug_http_service/ diff --git a/http_service/check_models.py b/http_service/check_models.py index 08fa46ff04..e838b2c80f 100644 --- a/http_service/check_models.py +++ b/http_service/check_models.py @@ -36,6 +36,13 @@ def check_models(): if __name__ == "__main__": + + should_check_models = os.environ.get("CHECK_MODELS", "1") + + if should_check_models == "0": + print("Skipping checking models as instructed by env var $CHECK_MODELS") + sys.exit(0) + try: check_models() except Exception: diff --git a/http_service/docker-compose.yml b/http_service/docker-compose.yml deleted file mode 100644 index 56bd616694..0000000000 --- a/http_service/docker-compose.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: '3.2' -services: - bugbug-http-service: - build: . - image: mozilla/bugbug-http-service - environment: - - BUGBUG_BUGZILLA_TOKEN - ports: - - target: 8000 - published: 8000 - protocol: tcp - mode: host \ No newline at end of file diff --git a/infra/data-pipeline.yml b/infra/data-pipeline.yml index b22c639ab7..93be2726f7 100644 --- a/infra/data-pipeline.yml +++ b/infra/data-pipeline.yml @@ -178,7 +178,7 @@ tasks: capabilities: privileged: true maxRunTime: 3600 - image: mozilla/taskboot:0.1.0 + image: mozilla/taskboot:0.1.1 command: - "/bin/sh" - "-lcxe" @@ -218,7 +218,7 @@ tasks: taskclusterProxy: true maxRunTime: 3600 - image: mozilla/taskboot:0.1.0 + image: mozilla/taskboot:0.1.1 env: TASKCLUSTER_SECRET: project/relman/bugbug/deploy command:
unionai-oss__pandera-1591
Error Importing Pandera with Polars extra **Describe the bug** I get an error when importing pandera after installing the latest 0.19.0b2 version with the polars extra in a clean environment. I can import it successfully if I install without the polars extra. - [x] I have checked that this issue has not already been reported. - [x] I have confirmed this bug exists on the latest version of pandera. - [ ] (optional) I have confirmed this bug exists on the main branch of pandera. #### Code Sample, a copy-pastable example I installed pandera 0.19.0b2 in a clean virtual environment using `pip install pandera[polars]==0.19.0b2` and attempted to import pandera: ```python import pandera as pa ``` I got the following error message: ``` >>> import pandera as pa Traceback (most recent call last): File "<stdin>", line 1, in <module> File ".venv/lib/python3.11/site-packages/pandera/__init__.py", line 6, in <module> from pandera import errors, external_config, typing File ".venv/lib/python3.11/site-packages/pandera/external_config.py", line 23, in <module> import pyspark.pandas ModuleNotFoundError: No module named 'pyspark' ``` #### Versions: - Pandera: 0.19.0b2 - Python: 3.11.7 - Ubuntu: 22.04
[ { "content": "\"\"\"Configuration for external packages.\"\"\"\n\nimport os\n\nis_spark_local_ip_dirty = False\nis_pyarrow_ignore_timezone_dirty = False\n\ntry:\n # try importing pyspark to see if it exists. This is important because the\n # pandera.typing module defines a Series type that inherits from\n # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas\n # Series and DataFrames to support type hinting:\n # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names\n # pylint: disable=unused-import\n if os.getenv(\"SPARK_LOCAL_IP\") is None:\n is_spark_local_ip_dirty = True\n os.environ[\"SPARK_LOCAL_IP\"] = \"127.0.0.1\"\n if os.getenv(\"PYARROW_IGNORE_TIMEZONE\") is None:\n is_pyarrow_ignore_timezone_dirty = True\n # This can be overriden by the user\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n\n import pyspark.pandas\nfinally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n if is_pyarrow_ignore_timezone_dirty:\n os.environ.pop(\"PYARROW_IGNORE_TIMEZONE\")\n", "path": "pandera/external_config.py" } ]
[ { "content": "\"\"\"Configuration for external packages.\"\"\"\n\nimport os\n\nis_spark_local_ip_dirty = False\nis_pyarrow_ignore_timezone_dirty = False\n\ntry:\n # try importing pyspark to see if it exists. This is important because the\n # pandera.typing module defines a Series type that inherits from\n # pandas.Series, and pyspark v1+ injects a __getitem__ method to pandas\n # Series and DataFrames to support type hinting:\n # https://spark.apache.org/docs/3.2.0/api/python/user_guide/pandas_on_spark/typehints.html#type-hinting-with-names\n # pylint: disable=unused-import\n if os.getenv(\"SPARK_LOCAL_IP\") is None:\n is_spark_local_ip_dirty = True\n os.environ[\"SPARK_LOCAL_IP\"] = \"127.0.0.1\"\n if os.getenv(\"PYARROW_IGNORE_TIMEZONE\") is None:\n is_pyarrow_ignore_timezone_dirty = True\n # This can be overriden by the user\n os.environ[\"PYARROW_IGNORE_TIMEZONE\"] = \"1\"\n\n import pyspark.pandas\nexcept (ImportError, ModuleNotFoundError):\n pass\nfinally:\n if is_spark_local_ip_dirty:\n os.environ.pop(\"SPARK_LOCAL_IP\")\n if is_pyarrow_ignore_timezone_dirty:\n os.environ.pop(\"PYARROW_IGNORE_TIMEZONE\")\n", "path": "pandera/external_config.py" } ]
diff --git a/pandera/external_config.py b/pandera/external_config.py index bd81a8d39..0e076e70e 100644 --- a/pandera/external_config.py +++ b/pandera/external_config.py @@ -21,6 +21,8 @@ os.environ["PYARROW_IGNORE_TIMEZONE"] = "1" import pyspark.pandas +except (ImportError, ModuleNotFoundError): + pass finally: if is_spark_local_ip_dirty: os.environ.pop("SPARK_LOCAL_IP")